[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1455423047-8571-1-git-send-email-slaoub@gmail.com>
Date: Sun, 14 Feb 2016 12:10:47 +0800
From: Chen Yucong <slaoub@...il.com>
To: mingo@...nel.org
Cc: tglx@...utronix.de, hpa@...or.com, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2] x86/kernel: use pr_<level>() and dev_<level>
arch/x86/kernel/* use a mixture of printk(KERN_<level> ) and pr_<level>().
This patch converts the bulk of printk(KERN_<level> ) to pr_<level>() and
uses dev_dbg() instead of the dev_printk(KERN_DEBUG,). All pr_warning()
calls have been replaced with pr_warn().
Not sure what to do about the printk(KERN_DEFAULT) and printk() without a
log level.
Signed-off-by: Chen Yucong <slaoub@...il.com>
---
arch/x86/kernel/acpi/boot.c | 110 ++++++++++++++++--------------------
arch/x86/kernel/acpi/cstate.c | 5 +-
arch/x86/kernel/acpi/sleep.c | 2 +-
arch/x86/kernel/alternative.c | 8 +--
arch/x86/kernel/amd_gart_64.c | 13 ++---
arch/x86/kernel/apb_timer.c | 23 ++++----
arch/x86/kernel/apic/apic.c | 43 +++++++-------
arch/x86/kernel/apic/apic_flat_64.c | 4 +-
arch/x86/kernel/apic/apic_noop.c | 2 +-
arch/x86/kernel/apic/bigsmp_32.c | 5 +-
arch/x86/kernel/apic/io_apic.c | 95 +++++++++++++++----------------
arch/x86/kernel/apic/probe_32.c | 9 ++-
arch/x86/kernel/apic/x2apic_phys.c | 2 +-
arch/x86/kernel/apic/x2apic_uv_x.c | 3 +-
arch/x86/kernel/apm_32.c | 80 ++++++++++++--------------
arch/x86/kernel/bootflag.c | 4 +-
arch/x86/kernel/check.c | 11 ++--
arch/x86/kernel/cpuid.c | 2 +-
arch/x86/kernel/crash_dump_32.c | 6 +-
arch/x86/kernel/devicetree.c | 6 +-
arch/x86/kernel/doublefault.c | 17 +++---
arch/x86/kernel/dumpstack.c | 4 +-
arch/x86/kernel/e820.c | 66 +++++++++++-----------
arch/x86/kernel/early-quirks.c | 38 +++++--------
arch/x86/kernel/early_printk.c | 4 +-
arch/x86/kernel/fpu/init.c | 3 +-
arch/x86/kernel/fpu/xstate.c | 3 +-
arch/x86/kernel/hpet.c | 33 +++++------
arch/x86/kernel/i8259.c | 5 +-
arch/x86/kernel/irq_32.c | 6 +-
arch/x86/kernel/jump_label.c | 4 +-
arch/x86/kernel/kgdb.c | 8 +--
arch/x86/kernel/kprobes/core.c | 11 ++--
arch/x86/kernel/kvm.c | 12 ++--
arch/x86/kernel/kvmclock.c | 10 ++--
arch/x86/kernel/mmconf-fam10h_64.c | 2 +-
arch/x86/kernel/module.c | 4 +-
arch/x86/kernel/nmi_selftest.c | 12 ++--
arch/x86/kernel/paravirt.c | 2 +-
arch/x86/kernel/pci-calgary_64.c | 87 ++++++++++++++--------------
arch/x86/kernel/pci-iommu_table.c | 4 +-
arch/x86/kernel/pci-nommu.c | 3 +-
arch/x86/kernel/pci-swiotlb.c | 3 +-
arch/x86/kernel/quirks.c | 48 ++++++++--------
arch/x86/kernel/rtc.c | 7 +--
arch/x86/kernel/setup.c | 22 ++++----
arch/x86/kernel/setup_percpu.c | 2 +-
arch/x86/kernel/smpboot.c | 6 +-
arch/x86/kernel/sysfb_efi.c | 7 +--
arch/x86/kernel/sysfb_simplefb.c | 2 +-
arch/x86/kernel/tboot.c | 16 +++---
arch/x86/kernel/tce_64.c | 5 +-
arch/x86/kernel/test_nx.c | 16 +++---
arch/x86/kernel/test_rodata.c | 10 ++--
arch/x86/kernel/tsc_sync.c | 6 +-
arch/x86/kernel/vsmp_64.c | 2 +-
arch/x86/pci/mmconfig-shared.c | 32 +++++------
arch/x86/pci/mmconfig_64.c | 4 +-
58 files changed, 448 insertions(+), 511 deletions(-)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e759076..cb3afc7 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -55,7 +55,8 @@ EXPORT_SYMBOL(acpi_disabled);
# include <asm/proto.h>
#endif /* X86 */
-#define PREFIX "ACPI: "
+#undef pr_fmt
+#define pr_fmt(fmt) "ACPI: " fmt
int acpi_noirq; /* skip ACPI IRQ initialization */
int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
@@ -141,14 +142,14 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
madt = (struct acpi_table_madt *)table;
if (!madt) {
- printk(KERN_WARNING PREFIX "Unable to map MADT\n");
+ pr_warn("Unable to map MADT\n");
return -ENODEV;
}
if (madt->address) {
acpi_lapic_addr = (u64) madt->address;
- printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
+ pr_debug("Local APIC address 0x%08x\n",
madt->address);
}
@@ -170,7 +171,7 @@ static int acpi_register_lapic(int id, u8 enabled)
unsigned int ver = 0;
if (id >= MAX_LOCAL_APIC) {
- printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
+ pr_info("skipped apicid that is too big\n");
return -EINVAL;
}
@@ -210,11 +211,11 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
* when we use CPU hotplug.
*/
if (!apic->apic_id_valid(apic_id) && enabled)
- printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+ pr_warn("x2apic entry ignored\n");
else
acpi_register_lapic(apic_id, enabled);
#else
- printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+ pr_warn("x2apic entry ignored\n");
#endif
return 0;
@@ -293,7 +294,7 @@ acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
acpi_table_print_madt_entry(header);
if (x2apic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@@ -311,7 +312,7 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e
acpi_table_print_madt_entry(header);
if (lapic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@@ -478,14 +479,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
if (intsrc->source_irq == 0) {
if (acpi_skip_timer_override) {
- printk(PREFIX "BIOS IRQ0 override ignored.\n");
+ pr_info("BIOS IRQ0 override ignored.\n");
return 0;
}
if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
&& (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
- printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ pr_info("BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
}
@@ -561,7 +562,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
if (old == new)
return;
- printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
+ pr_info("setting ELCR to %04x (from %04x)\n", new, old);
outb(new, 0x4d0);
outb(new >> 8, 0x4d1);
}
@@ -717,7 +718,7 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED);
if (cpu < 0) {
- pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+ pr_info("Unable to map lapic to logical cpu number\n");
return cpu;
}
@@ -833,8 +834,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
- printk(KERN_WARNING PREFIX "HPET timers must be located in "
- "memory.\n");
+ pr_warn("HPET timers must be located in memory.\n");
return -1;
}
@@ -846,9 +846,8 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
* want to allocate a resource there.
*/
if (!hpet_address) {
- printk(KERN_WARNING PREFIX
- "HPET id: %#x base: %#lx is invalid\n",
- hpet_tbl->id, hpet_address);
+ pr_warn("HPET id: %#x base: %#lx is invalid\n",
+ hpet_tbl->id, hpet_address);
return 0;
}
#ifdef CONFIG_X86_64
@@ -859,20 +858,19 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
*/
if (hpet_address == 0xfed0000000000000UL) {
if (!hpet_force_user) {
- printk(KERN_WARNING PREFIX "HPET id: %#x "
+ pr_warn("HPET id: %#x "
"base: 0xfed0000000000000 is bogus\n "
"try hpet=force on the kernel command line to "
"fix it up to 0xfed00000.\n", hpet_tbl->id);
hpet_address = 0;
return 0;
}
- printk(KERN_WARNING PREFIX
- "HPET id: %#x base: 0xfed0000000000000 fixed up "
- "to 0xfed00000.\n", hpet_tbl->id);
+ pr_warn("HPET id: %#x base: 0xfed0000000000000 fixed up to 0xfed00000.\n",
+ hpet_tbl->id);
hpet_address >>= 32;
}
#endif
- printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
+ pr_info("HPET id: %#x base: %#lx\n",
hpet_tbl->id, hpet_address);
/*
@@ -935,7 +933,7 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
}
if (pmtmr_ioport)
- printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
+ pr_info("PM-Timer IO Port: %#x\n",
pmtmr_ioport);
#endif
return 0;
@@ -962,8 +960,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
acpi_parse_lapic_addr_ovr, 0);
if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC address override entry\n");
+ pr_err("Error parsing LAPIC address override entry\n");
return count;
}
@@ -990,8 +987,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
acpi_parse_lapic_addr_ovr, 0);
if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC address override entry\n");
+ pr_err("Error parsing LAPIC address override entry\n");
return count;
}
@@ -1010,8 +1006,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
sizeof(struct acpi_table_madt),
madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
if (ret < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC/X2APIC entries\n");
+ pr_err("Error parsing LAPIC/X2APIC entries\n");
return ret;
}
@@ -1019,11 +1014,11 @@ static int __init acpi_parse_madt_lapic_entries(void)
count = madt_proc[1].count;
}
if (!count && !x2count) {
- printk(KERN_ERR PREFIX "No LAPIC entries present\n");
+ pr_err("No LAPIC entries present\n");
/* TBD: Cleanup to allow fallback to MPS */
return -ENODEV;
} else if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
+ pr_err("Error parsing LAPIC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1033,7 +1028,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
acpi_parse_lapic_nmi, 0);
if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+ pr_err("Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1092,7 +1087,7 @@ static void __init mp_config_acpi_legacy_irqs(void)
}
if (idx != mp_irq_entries) {
- printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
+ pr_debug("ACPI: IRQ%d used by override.\n", i);
continue; /* IRQ already used */
}
@@ -1132,26 +1127,24 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* if "noapic" boot option, don't look for IO-APICs
*/
if (skip_ioapic_setup) {
- printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
- "due to 'noapic' option.\n");
+ pr_info("Skipping IOAPIC probe due to 'noapic' option.\n");
return -ENODEV;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
MAX_IO_APICS);
if (!count) {
- printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
+ pr_err("No IOAPIC entries present\n");
return -ENODEV;
} else if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
+ pr_err("Error parsing IOAPIC entry\n");
return count;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
acpi_parse_int_src_ovr, nr_irqs);
if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing interrupt source overrides entry\n");
+ pr_err("Error parsing interrupt source overrides entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1170,7 +1163,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
acpi_parse_nmi_src, nr_irqs);
if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
+ pr_err("Error parsing NMI SRC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1203,8 +1196,7 @@ static void __init early_acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
- printk(KERN_ERR PREFIX
- "Invalid BIOS MADT, disabling ACPI\n");
+ pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
}
@@ -1241,8 +1233,7 @@ static void __init acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
- printk(KERN_ERR PREFIX
- "Invalid BIOS MADT, disabling ACPI\n");
+ pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
} else {
@@ -1252,8 +1243,7 @@ static void __init acpi_process_madt(void)
* Boot with "acpi=off" to use MPS on such a system.
*/
if (smp_found_config) {
- printk(KERN_WARNING PREFIX
- "No APIC-table, disabling MPS\n");
+ pr_warn("No APIC-table, disabling MPS\n");
smp_found_config = 0;
}
}
@@ -1263,11 +1253,9 @@ static void __init acpi_process_madt(void)
* processors, where MPS only supports physical.
*/
if (acpi_lapic && acpi_ioapic)
- printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
- "information\n");
+ pr_info("Using ACPI (MADT) for SMP configuration information\n");
else if (acpi_lapic)
- printk(KERN_INFO "Using ACPI for processor (LAPIC) "
- "configuration information\n");
+ pr_info("Using ACPI for processor (LAPIC) configuration information\n");
#endif
return;
}
@@ -1275,8 +1263,8 @@ static void __init acpi_process_madt(void)
static int __init disable_acpi_irq(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
- d->ident);
+ pr_notice("%s detected: force use of acpi=noirq\n",
+ d->ident);
acpi_noirq_set();
}
return 0;
@@ -1285,8 +1273,8 @@ static int __init disable_acpi_irq(const struct dmi_system_id *d)
static int __init disable_acpi_pci(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
- d->ident);
+ pr_notice("%s detected: force use of pci=noacpi\n",
+ d->ident);
acpi_disable_pci();
}
return 0;
@@ -1295,11 +1283,10 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d)
static int __init dmi_disable_acpi(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
+ pr_notice("%s detected: acpi off\n", d->ident);
disable_acpi();
} else {
- printk(KERN_NOTICE
- "Warning: DMI blacklist says broken, but acpi forced\n");
+ pr_notice("Warning: DMI blacklist says broken, but acpi forced\n");
}
return 0;
}
@@ -1514,9 +1501,9 @@ void __init acpi_boot_table_init(void)
*/
if (acpi_blacklisted()) {
if (acpi_force) {
- printk(KERN_WARNING PREFIX "acpi=force override\n");
+ pr_warn("acpi=force override\n");
} else {
- printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
+ pr_warn("Disabling ACPI support\n");
disable_acpi();
return;
}
@@ -1630,9 +1617,8 @@ int __init acpi_mps_check(void)
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
if (acpi_disabled || acpi_noirq) {
- printk(KERN_WARNING "MPS support code is not built-in.\n"
- "Using acpi=off or acpi=noirq or pci=noacpi "
- "may have problem\n");
+ pr_warn("MPS support code is not built-in.\n"
+ "Using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
}
#endif
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 4b28159..d94cb2a 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -103,9 +103,8 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
if (!mwait_supported[cstate_type]) {
mwait_supported[cstate_type] = 1;
- printk(KERN_DEBUG
- "Monitor-Mwait will be used to enter C-%d "
- "state\n", cx->type);
+ pr_debug("Monitor-Mwait will be used to enter C-%d state\n",
+ cx->type);
}
snprintf(cx->desc,
ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index d1daead..a850963 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -48,7 +48,7 @@ int x86_acpi_suspend_lowlevel(void)
(struct wakeup_header *) __va(real_mode_header->wakeup_header);
if (header->signature != WAKEUP_HEADER_SIGNATURE) {
- printk(KERN_ERR "wakeup header does not match\n");
+ pr_err("wakeup header does not match\n");
return -EINVAL;
}
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 25f9093..0ecb579 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -59,7 +59,7 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt);
#define DPRINTK(fmt, args...) \
do { \
if (debug_alternative) \
- printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
+ pr_debug("%s: " fmt "\n", __func__, ##args); \
} while (0)
#define DUMP_BYTES(buf, len, fmt, args...) \
@@ -70,10 +70,10 @@ do { \
if (!(len)) \
break; \
\
- printk(KERN_DEBUG fmt, ##args); \
+ pr_debug(fmt, ##args); \
for (j = 0; j < (len) - 1; j++) \
- printk(KERN_CONT "%02hhx ", buf[j]); \
- printk(KERN_CONT "%02hhx\n", buf[j]); \
+ pr_cont("%02hhx ", buf[j]); \
+ pr_cont("%02hhx\n", buf[j]); \
} \
} while (0)
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 8e3842f..24fb9cf 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -536,10 +536,9 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
if (iommu_size < 64*1024*1024) {
- pr_warning(
- "PCI-DMA: Warning: Small IOMMU %luMB."
+ pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."
" Consider increasing the AGP aperture in BIOS\n",
- iommu_size >> 20);
+ iommu_size >> 20);
}
return iommu_size;
@@ -691,8 +690,8 @@ static __init int init_amd_gatt(struct agp_kern_info *info)
nommu:
/* Should not happen anymore */
- pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
- "falling back to iommu=soft.\n");
+ pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
+ "falling back to iommu=soft.\n");
return -1;
}
@@ -757,8 +756,8 @@ int __init gart_iommu_init(void)
!gart_iommu_aperture ||
(no_agp && init_amd_gatt(&info) < 0)) {
if (max_pfn > MAX_DMA32_PFN) {
- pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
- pr_warning("falling back to iommu=soft.\n");
+ pr_warn("More than 4GB of memory but GART IOMMU not available.\n");
+ pr_warn("falling back to iommu=soft.\n");
}
return 0;
}
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 222a570..c5b378a 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -90,13 +90,13 @@ static inline void apbt_set_mapping(void)
}
mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
if (mtmr == NULL) {
- printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
+ pr_err("Failed to get MTMR %d from SFI\n",
APBT_CLOCKEVENT0_NUM);
return;
}
apbt_address = (phys_addr_t)mtmr->phys_addr;
if (!apbt_address) {
- printk(KERN_WARNING "No timer base from SFI, use default\n");
+ pr_warn("No timer base from SFI, use default\n");
apbt_address = APBT_DEFAULT_BASE;
}
apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
@@ -142,7 +142,7 @@ static int __init apbt_clockevent_register(void)
mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
if (mtmr == NULL) {
- printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
+ pr_err("Failed to get MTMR %d from SFI\n",
APBT_CLOCKEVENT0_NUM);
return -ENODEV;
}
@@ -157,8 +157,8 @@ static int __init apbt_clockevent_register(void)
if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
global_clock_event = &adev->timer->ced;
- printk(KERN_DEBUG "%s clockevent registered as global\n",
- global_clock_event->name);
+ pr_debug("%s clockevent registered as global\n",
+ global_clock_event->name);
}
dw_apb_clockevent_register(adev->timer);
@@ -196,8 +196,8 @@ void apbt_setup_secondary_clock(void)
dw_apb_clockevent_resume(adev->timer);
}
- printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n",
- cpu, adev->name, adev->cpu);
+ pr_info("Registering CPU %d clockevent device %s, cpu %08x\n",
+ cpu, adev->name, adev->cpu);
apbt_setup_irq(adev);
dw_apb_clockevent_register(adev->timer);
@@ -327,7 +327,7 @@ void __init apbt_time_init(void)
#ifdef CONFIG_SMP
/* kernel cmdline disable apb timer, so we will use lapic timers */
if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
- printk(KERN_INFO "apbt: disabled per cpu timer\n");
+ pr_info("apbt: disabled per cpu timer\n");
return;
}
pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
@@ -346,7 +346,7 @@ void __init apbt_time_init(void)
if (p_mtmr)
adev->irq = p_mtmr->irq;
else
- printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
+ pr_err("Failed to get timer for cpu %d\n", i);
snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i);
}
#endif
@@ -400,13 +400,12 @@ unsigned long apbt_quick_calibrate(void)
shift = 5;
if (unlikely(loop >> shift == 0)) {
- printk(KERN_INFO
- "APBT TSC calibration failed, not enough resolution\n");
+ pr_info("APBT TSC calibration failed, not enough resolution\n");
return 0;
}
scale = (int)div_u64((t2 - t1), loop >> shift);
khz = (scale * (apbt_freq / 1000)) >> shift;
- printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
+ pr_info("TSC freq calculated by APB timer is %lu khz\n", khz);
return khz;
failed:
return 0;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 8a5cdda..4419c52 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -349,7 +349,7 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
*/
asm volatile("mfence" : : : "memory");
- printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
+ pr_debug_once("TSC deadline timer enabled\n");
return;
}
@@ -657,8 +657,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
res = (((u64)deltapm) * mult) >> 22;
do_div(res, 1000000);
- pr_warning("APIC calibration not consistent "
- "with PM-Timer: %ldms instead of 100ms\n",(long)res);
+ pr_warn("APIC calibration not consistent with PM-Timer: %ldms instead of 100ms\n",
+ (long)res);
/* Correct the lapic counter value */
res = (((u64)(*delta)) * pm_100ms);
@@ -777,7 +777,7 @@ static int __init calibrate_APIC_clock(void)
*/
if (lapic_timer_frequency < (1000000 / HZ)) {
local_irq_enable();
- pr_warning("APIC frequency too slow, disabling apic timer\n");
+ pr_warn("APIC frequency too slow, disabling apic timer\n");
return -1;
}
@@ -820,7 +820,7 @@ static int __init calibrate_APIC_clock(void)
local_irq_enable();
if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
- pr_warning("APIC timer disabled due to verification failure\n");
+ pr_warn("APIC timer disabled due to verification failure\n");
return -1;
}
@@ -893,7 +893,7 @@ static void local_apic_timer_interrupt(void)
* spurious.
*/
if (!evt->event_handler) {
- pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
+ pr_warn("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
/* Switch it off */
lapic_timer_shutdown(evt);
return;
@@ -1306,7 +1306,7 @@ void setup_local_APIC(void)
}
}
if (acked > 256) {
- printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
+ pr_err("LAPIC pending interrupts after %d EOI\n",
acked);
break;
}
@@ -1454,7 +1454,7 @@ static void __x2apic_disable(void)
/* Disable xapic and x2apic first and then reenable xapic mode */
wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
- printk_once(KERN_INFO "x2apic disabled\n");
+ pr_info_once("x2apic disabled\n");
}
static void __x2apic_enable(void)
@@ -1465,7 +1465,7 @@ static void __x2apic_enable(void)
if (msr & X2APIC_ENABLE)
return;
wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
- printk_once(KERN_INFO "x2apic enabled\n");
+ pr_info_once("x2apic enabled\n");
}
static int __init setup_nox2apic(char *str)
@@ -1474,11 +1474,11 @@ static int __init setup_nox2apic(char *str)
int apicid = native_apic_msr_read(APIC_ID);
if (apicid >= 255) {
- pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
+ pr_warn("Apicid: %08x, cannot enforce nox2apic\n",
apicid);
return 0;
}
- pr_warning("x2apic already enabled.\n");
+ pr_warn("x2apic already enabled.\n");
__x2apic_disable();
}
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
@@ -1652,7 +1652,7 @@ static int __init apic_verify(void)
*/
features = cpuid_edx(1);
if (!(features & (1 << X86_FEATURE_APIC))) {
- pr_warning("Could not enable APIC!\n");
+ pr_warn("Could not enable APIC!\n");
return -1;
}
set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
@@ -2028,9 +2028,8 @@ int generic_processor_info(int apicid, int version)
disabled_cpu_apicid == apicid) {
int thiscpu = num_processors + disabled_cpus;
- pr_warning("APIC: Disabling requested cpu."
- " Processor %d/0x%x ignored.\n",
- thiscpu, apicid);
+ pr_warn("APIC: Disabling requested cpu. Processor %d/0x%x ignored.\n",
+ thiscpu, apicid);
disabled_cpus++;
return -ENODEV;
@@ -2044,8 +2043,7 @@ int generic_processor_info(int apicid, int version)
apicid != boot_cpu_physical_apicid) {
int thiscpu = max + disabled_cpus - 1;
- pr_warning(
- "ACPI: NR_CPUS/possible_cpus limit of %i almost"
+ pr_warn("ACPI: NR_CPUS/possible_cpus limit of %i almost"
" reached. Keeping one slot for boot cpu."
" Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
@@ -2056,8 +2054,7 @@ int generic_processor_info(int apicid, int version)
if (num_processors >= nr_cpu_ids) {
int thiscpu = max + disabled_cpus;
- pr_warning(
- "ACPI: NR_CPUS/possible_cpus limit of %i reached."
+ pr_warn("ACPI: NR_CPUS/possible_cpus limit of %i reached."
" Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
disabled_cpus++;
@@ -2081,14 +2078,14 @@ int generic_processor_info(int apicid, int version)
* Validate version
*/
if (version == 0x0) {
- pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
- cpu, apicid);
+ pr_warn("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
+ cpu, apicid);
version = 0x10;
}
apic_version[apicid] = version;
if (version != apic_version[boot_cpu_physical_apicid]) {
- pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
+ pr_warn("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
apic_version[boot_cpu_physical_apicid], cpu, version);
}
@@ -2531,7 +2528,7 @@ static int __init apic_set_verbosity(char *arg)
else if (strcmp("verbose", arg) == 0)
apic_verbosity = APIC_VERBOSE;
else {
- pr_warning("APIC Verbosity level %s not recognised"
+ pr_warn("APIC Verbosity level %s not recognised"
" use apic=verbose or apic=debug\n", arg);
return -EINVAL;
}
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 9968f30..1af714c 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -218,12 +218,12 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
*/
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
- printk(KERN_DEBUG "system APIC only can use physical flat");
+ pr_debug("system APIC only can use physical flat");
return 1;
}
if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
- printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
+ pr_debug("IBM Summit detected, will use apic physical");
return 1;
}
#endif
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 331a7a0..80380d4 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -94,7 +94,7 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
if (cpu != 0)
- pr_warning("APIC: Vector allocated for non-BSP cpu\n");
+ pr_warn("APIC: Vector allocated for non-BSP cpu\n");
cpumask_copy(retmask, cpumask_of(cpu));
}
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index cf9bd89..52f0fb5 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -67,8 +67,7 @@ static void bigsmp_init_apic_ldr(void)
static void bigsmp_setup_apic_routing(void)
{
- printk(KERN_INFO
- "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
+ pr_info("Enabling APIC mode: Physflat. Using %d I/O APICs\n",
nr_ioapics);
}
@@ -110,7 +109,7 @@ static int dmi_bigsmp; /* can be set by dmi scanners */
static int hp_ht_bigsmp(const struct dmi_system_id *d)
{
- printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
+ pr_notice("%s detected: force use of apic=bigsmp\n", d->ident);
dmi_bigsmp = 1;
return 0;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index fdb0fbf..71b05654 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1253,7 +1253,7 @@ static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
struct IO_APIC_route_entry entry;
struct IR_IO_APIC_route_entry *ir_entry = (void *)&entry;
- printk(KERN_DEBUG "IOAPIC %d:\n", apic);
+ pr_debug("IOAPIC %d:\n", apic);
for (i = 0; i <= nr_entries; i++) {
entry = ioapic_read_entry(apic, i);
snprintf(buf, sizeof(buf),
@@ -1264,15 +1264,14 @@ static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
entry.polarity == IOAPIC_POL_LOW ? "low " : "high",
entry.vector, entry.irr, entry.delivery_status);
if (ir_entry->format)
- printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n",
- buf, (ir_entry->index << 15) | ir_entry->index,
- ir_entry->zero);
+ pr_debug("%s, remapped, I(%04X), Z(%X)\n", buf,
+ (ir_entry->index << 15) | ir_entry->index,
+ ir_entry->zero);
else
- printk(KERN_DEBUG "%s, %s, D(%02X), M(%1d)\n",
- buf,
- entry.dest_mode == IOAPIC_DEST_MODE_LOGICAL ?
- "logical " : "physical",
- entry.dest, entry.delivery_mode);
+ pr_debug("%s, %s, D(%02X), M(%1d)\n", buf,
+ entry.dest_mode == IOAPIC_DEST_MODE_LOGICAL ?
+ "logical " : "physical",
+ entry.dest, entry.delivery_mode);
}
}
@@ -1293,19 +1292,19 @@ static void __init print_IO_APIC(int ioapic_idx)
reg_03.raw = io_apic_read(ioapic_idx, 3);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
- printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
- printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
- printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
- printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
+ pr_debug("IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
+ pr_debug(".... register #00: %08X\n", reg_00.raw);
+ pr_debug("....... : physical APIC id: %02X\n", reg_00.bits.ID);
+ pr_debug("....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
+ pr_debug("....... : LTS : %X\n", reg_00.bits.LTS);
- printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
- printk(KERN_DEBUG "....... : max redirection entries: %02X\n",
- reg_01.bits.entries);
+ pr_debug(".... register #01: %08X\n", *(int *)®_01);
+ pr_debug("....... : max redirection entries: %02X\n",
+ reg_01.bits.entries);
- printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
- printk(KERN_DEBUG "....... : IO APIC version: %02X\n",
- reg_01.bits.version);
+ pr_debug("....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
+ pr_debug("....... : IO APIC version: %02X\n",
+ reg_01.bits.version);
/*
* Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
@@ -1313,8 +1312,9 @@ static void __init print_IO_APIC(int ioapic_idx)
* value, so ignore it if reg_02 == reg_01.
*/
if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
- printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
- printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
+ pr_debug(".... register #02: %08X\n", reg_02.raw);
+ pr_debug("....... : arbitration: %02X\n",
+ reg_02.bits.arbitration);
}
/*
@@ -1324,11 +1324,12 @@ static void __init print_IO_APIC(int ioapic_idx)
*/
if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
reg_03.raw != reg_01.raw) {
- printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
- printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
+ pr_debug(".... register #03: %08X\n", reg_03.raw);
+ pr_debug("....... : Boot DT : %X\n",
+ reg_03.bits.boot_DT);
}
- printk(KERN_DEBUG ".... IRQ redirection table:\n");
+ pr_debug(".... IRQ redirection table:\n");
io_apic_print_entries(ioapic_idx, reg_01.bits.entries);
}
@@ -1337,22 +1338,22 @@ void __init print_IO_APICs(void)
int ioapic_idx;
unsigned int irq;
- printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
+ pr_debug("number of MP IRQ sources: %d.\n", mp_irq_entries);
for_each_ioapic(ioapic_idx)
- printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
- mpc_ioapic_id(ioapic_idx),
- ioapics[ioapic_idx].nr_registers);
+ pr_debug("number of IO-APIC #%d registers: %d.\n",
+ mpc_ioapic_id(ioapic_idx),
+ ioapics[ioapic_idx].nr_registers);
/*
* We are a bit conservative about what we expect. We have to
* know about every hardware change ASAP.
*/
- printk(KERN_INFO "testing the IO APIC.......................\n");
+ pr_info("testing the IO APIC.......................\n");
for_each_ioapic(ioapic_idx)
print_IO_APIC(ioapic_idx);
- printk(KERN_DEBUG "IRQ to pin mappings:\n");
+ pr_debug("IRQ to pin mappings:\n");
for_each_active_irq(irq) {
struct irq_pin_list *entry;
struct irq_chip *chip;
@@ -1367,13 +1368,13 @@ void __init print_IO_APICs(void)
if (list_empty(&data->irq_2_pin))
continue;
- printk(KERN_DEBUG "IRQ%d ", irq);
+ pr_debug("IRQ%d ", irq);
for_each_irq_pin(entry, data->irq_2_pin)
pr_cont("-> %d:%d", entry->apic, entry->pin);
pr_cont("\n");
}
- printk(KERN_INFO ".................................... done.\n");
+ pr_info(".................................... done.\n");
}
/* Where if anywhere is the i8259 connect in external int mode */
@@ -1413,7 +1414,7 @@ void __init enable_IO_APIC(void)
i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
/* Trust the MP table if nothing is setup in the hardware */
if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
- printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
+ pr_warn("ExtINT not setup in hardware but reported by MP table\n");
ioapic_i8259.pin = i8259_pin;
ioapic_i8259.apic = i8259_apic;
}
@@ -1421,7 +1422,7 @@ void __init enable_IO_APIC(void)
if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
(i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
{
- printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
+ pr_warn("ExtINT in hardware and MP table differ\n");
}
/*
@@ -1508,9 +1509,9 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void)
old_id = mpc_ioapic_id(ioapic_idx);
if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) {
- printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
+ pr_err("BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
ioapic_idx, mpc_ioapic_id(ioapic_idx));
- printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
+ pr_err("... fixing up to %d. (tell your hw vendor)\n",
reg_00.bits.ID);
ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID;
}
@@ -1522,14 +1523,14 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void)
*/
if (apic->check_apicid_used(&phys_id_present_map,
mpc_ioapic_id(ioapic_idx))) {
- printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
+ pr_err("BIOS bug, IO-APIC#%d ID %d is already used!...\n",
ioapic_idx, mpc_ioapic_id(ioapic_idx));
for (i = 0; i < get_physical_broadcast(); i++)
if (!physid_isset(i, phys_id_present_map))
break;
if (i >= get_physical_broadcast())
panic("Max APIC ID exceeded!\n");
- printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
+ pr_err("... fixing up to %d. (tell your hw vendor)\n",
i);
physid_set(i, phys_id_present_map);
ioapics[ioapic_idx].mp_config.apicid = i;
@@ -2369,8 +2370,8 @@ static int io_apic_get_unique_id(int ioapic, int apic_id)
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
if (apic_id >= get_physical_broadcast()) {
- printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
- "%d\n", ioapic, apic_id, reg_00.bits.ID);
+ pr_warn("IOAPIC[%d]: Invalid apic_id %d, trying %d\n",
+ ioapic, apic_id, reg_00.bits.ID);
apic_id = reg_00.bits.ID;
}
@@ -2388,8 +2389,8 @@ static int io_apic_get_unique_id(int ioapic, int apic_id)
if (i == get_physical_broadcast())
panic("Max apic_id exceeded!\n");
- printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
- "trying %d\n", ioapic, apic_id, i);
+ pr_warn("IOAPIC[%d]: apic_id %d already used, trying %d\n",
+ ioapic, apic_id, i);
apic_id = i;
}
@@ -2609,8 +2610,7 @@ void __init io_apic_init_mappings(void)
ioapic_phys = mpc_ioapic_addr(i);
#ifdef CONFIG_X86_32
if (!ioapic_phys) {
- printk(KERN_ERR
- "WARNING: bogus zero IO-APIC "
+ pr_err("WARNING: bogus zero IO-APIC "
"address found in MPTABLE, "
"disabling IO/APIC support!\n");
smp_found_config = 0;
@@ -2644,8 +2644,7 @@ void __init ioapic_insert_resources(void)
if (!r) {
if (nr_ioapics > 0)
- printk(KERN_ERR
- "IO APIC resources couldn't be allocated.\n");
+ pr_err("IO APIC resources couldn't be allocated.\n");
return;
}
@@ -2669,7 +2668,7 @@ int mp_find_ioapic(u32 gsi)
return i;
}
- printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
+ pr_err("ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
return -1;
}
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index f316e34..fd2a3dc 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -60,8 +60,7 @@ static int default_x86_32_early_logical_apicid(int cpu)
static void setup_apic_flat_routing(void)
{
#ifdef CONFIG_X86_IO_APIC
- printk(KERN_INFO
- "Enabling APIC mode: Flat. Using %d I/O APICs\n",
+ pr_info("Enabling APIC mode: Flat. Using %d I/O APICs\n",
nr_ioapics);
#endif
}
@@ -202,7 +201,7 @@ void __init generic_apic_probe(void)
if (drv == __apicdrivers_end)
panic("Didn't find an APIC driver");
}
- printk(KERN_INFO "Using APIC driver %s\n", apic->name);
+ pr_info("Using APIC driver %s\n", apic->name);
}
/* This function can switch the APIC even after the initial ->probe() */
@@ -218,8 +217,8 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
if (!cmdline_apic) {
apic = *drv;
- printk(KERN_INFO "Switched to APIC driver `%s'.\n",
- apic->name);
+ pr_info("Switched to APIC driver `%s'.\n",
+ apic->name);
}
return 1;
}
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index a1242e2..e7b2d58 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -24,7 +24,7 @@ static bool x2apic_fadt_phys(void)
#ifdef CONFIG_ACPI
if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
- printk(KERN_DEBUG "System requires x2apic physical mode\n");
+ pr_debug("System requires x2apic physical mode\n");
return true;
}
#endif
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 624db005..81a856d 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -725,8 +725,7 @@ static __init void uv_rtc_init(void)
status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
&ticks_per_sec);
if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
- printk(KERN_WARNING
- "unable to determine platform RTC clock frequency, "
+ pr_warn("unable to determine platform RTC clock frequency, "
"guessing.\n");
/* BIOS gives wrong value for clock freq. so guess */
sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 052c9c3..3ed14f5 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -857,7 +857,7 @@ static int apm_do_idle(void)
* Only report the failure the first 5 times.
*/
if (++t < 5) {
- printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err);
+ pr_debug("apm_do_idle failed (%d)\n", err);
t = jiffies;
}
return -1;
@@ -1326,11 +1326,11 @@ static void check_events(void)
while ((event = get_event()) != 0) {
if (debug) {
if (event <= NR_APM_EVENT_NAME)
- printk(KERN_DEBUG "apm: received %s notify\n",
- apm_event_name[event - 1]);
+ pr_debug("apm: received %s notify\n",
+ apm_event_name[event - 1]);
else
- printk(KERN_DEBUG "apm: received unknown "
- "event 0x%02x\n", event);
+ pr_debug("apm: received unknown event 0x%02x\n",
+ event);
}
if (ignore_bounce
&& (time_after(jiffies, last_resume + bounce_interval)))
@@ -1416,7 +1416,7 @@ static void apm_event_handler(void)
(pending_count-- <= 0)) {
pending_count = 4;
if (debug)
- printk(KERN_DEBUG "apm: setting state busy\n");
+ pr_debug("apm: setting state busy\n");
err = set_system_power_state(APM_STATE_BUSY);
if (err)
apm_error("busy", err);
@@ -1760,7 +1760,7 @@ static int apm(void *unused)
}
if (debug)
- printk(KERN_INFO "apm: Connection version %d.%d\n",
+ pr_info("apm: Connection version %d.%d\n",
(apm_info.connection_version >> 8) & 0xff,
apm_info.connection_version & 0xff);
@@ -1791,7 +1791,7 @@ static int apm(void *unused)
if (debug && (num_online_cpus() == 1 || smp)) {
error = apm_get_power_status(&bx, &cx, &dx);
if (error)
- printk(KERN_INFO "apm: power status not available\n");
+ pr_info("apm: power status not available\n");
else {
switch ((bx >> 8) & 0xff) {
case 0:
@@ -1824,17 +1824,15 @@ static int apm(void *unused)
bat_stat = "unknown";
break;
}
- printk(KERN_INFO
- "apm: AC %s, battery status %s, battery life ",
- power_stat, bat_stat);
+ pr_info("apm: AC %s, battery status %s, battery life ",
+ power_stat, bat_stat);
if ((cx & 0xff) == 0xff)
printk("unknown\n");
else
printk("%d%%\n", cx & 0xff);
if (apm_info.connection_version > 0x100) {
- printk(KERN_INFO
- "apm: battery flag 0x%02x, battery life ",
- (cx >> 8) & 0xff);
+ pr_info("apm: battery flag 0x%02x, battery life ",
+ (cx >> 8) & 0xff);
if (dx == 0xffff)
printk("unknown\n");
else
@@ -1943,8 +1941,8 @@ static int __init print_if_true(const struct dmi_system_id *d)
*/
static int __init broken_ps2_resume(const struct dmi_system_id *d)
{
- printk(KERN_INFO "%s machine detected. Mousepad Resume Bug "
- "workaround hopefully not needed.\n", d->ident);
+ pr_info("%s machine detected. Mousepad Resume Bug workaround hopefully not needed.\n",
+ d->ident);
return 0;
}
@@ -1953,8 +1951,8 @@ static int __init set_realmode_power_off(const struct dmi_system_id *d)
{
if (apm_info.realmode_power_off == 0) {
apm_info.realmode_power_off = 1;
- printk(KERN_INFO "%s bios detected. "
- "Using realmode poweroff only.\n", d->ident);
+ pr_info("%s bios detected. Using realmode poweroff only.\n",
+ d->ident);
}
return 0;
}
@@ -1964,8 +1962,8 @@ static int __init set_apm_ints(const struct dmi_system_id *d)
{
if (apm_info.allow_ints == 0) {
apm_info.allow_ints = 1;
- printk(KERN_INFO "%s machine detected. "
- "Enabling interrupts during APM calls.\n", d->ident);
+ pr_info("%s machine detected. Enabling interrupts during APM calls.\n",
+ d->ident);
}
return 0;
}
@@ -1975,8 +1973,7 @@ static int __init apm_is_horked(const struct dmi_system_id *d)
{
if (apm_info.disabled == 0) {
apm_info.disabled = 1;
- printk(KERN_INFO "%s machine detected. "
- "Disabling APM.\n", d->ident);
+ pr_info("%s machine detected. Disabling APM.\n", d->ident);
}
return 0;
}
@@ -1985,10 +1982,9 @@ static int __init apm_is_horked_d850md(const struct dmi_system_id *d)
{
if (apm_info.disabled == 0) {
apm_info.disabled = 1;
- printk(KERN_INFO "%s machine detected. "
- "Disabling APM.\n", d->ident);
- printk(KERN_INFO "This bug is fixed in bios P15 which is available for\n");
- printk(KERN_INFO "download from support.intel.com\n");
+ pr_info("%s machine detected. Disabling APM.\n", d->ident);
+ pr_info("This bug is fixed in bios P15 which is available for\n");
+ pr_info("download from support.intel.com\n");
}
return 0;
}
@@ -1998,8 +1994,8 @@ static int __init apm_likes_to_melt(const struct dmi_system_id *d)
{
if (apm_info.forbid_idle == 0) {
apm_info.forbid_idle = 1;
- printk(KERN_INFO "%s machine detected. "
- "Disabling APM idle calls.\n", d->ident);
+ pr_info("%s machine detected. Disabling APM idle calls.\n",
+ d->ident);
}
return 0;
}
@@ -2022,8 +2018,7 @@ static int __init apm_likes_to_melt(const struct dmi_system_id *d)
static int __init broken_apm_power(const struct dmi_system_id *d)
{
apm_info.get_power_status_broken = 1;
- printk(KERN_WARNING "BIOS strings suggest APM bugs, "
- "disabling power status reporting.\n");
+ pr_warn("BIOS strings suggest APM bugs, disabling power status reporting.\n");
return 0;
}
@@ -2034,8 +2029,7 @@ static int __init broken_apm_power(const struct dmi_system_id *d)
static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
{
apm_info.get_power_status_swabinminutes = 1;
- printk(KERN_WARNING "BIOS strings suggest APM reports battery life "
- "in minutes and wrong byte order.\n");
+ pr_warn("BIOS strings suggest APM reports battery life in minutes and wrong byte order.\n");
return 0;
}
@@ -2268,17 +2262,16 @@ static int __init apm_init(void)
dmi_check_system(apm_dmi_table);
if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) {
- printk(KERN_INFO "apm: BIOS not found.\n");
+ pr_info("apm: BIOS not found.\n");
return -ENODEV;
}
- printk(KERN_INFO
- "apm: BIOS version %d.%d Flags 0x%02x (Driver version %s)\n",
- ((apm_info.bios.version >> 8) & 0xff),
- (apm_info.bios.version & 0xff),
- apm_info.bios.flags,
- driver_version);
+ pr_info("apm: BIOS version %d.%d Flags 0x%02x (Driver version %s)\n",
+ ((apm_info.bios.version >> 8) & 0xff),
+ (apm_info.bios.version & 0xff),
+ apm_info.bios.flags,
+ driver_version);
if ((apm_info.bios.flags & APM_32_BIT_SUPPORT) == 0) {
- printk(KERN_INFO "apm: no 32 bit BIOS support\n");
+ pr_info("apm: no 32 bit BIOS support\n");
return -ENODEV;
}
@@ -2304,7 +2297,7 @@ static int __init apm_init(void)
apm_info.bios.cseg_16_len = 0; /* 64k */
if (debug) {
- printk(KERN_INFO "apm: entry %x:%x cseg16 %x dseg %x",
+ pr_info("apm: entry %x:%x cseg16 %x dseg %x",
apm_info.bios.cseg, apm_info.bios.offset,
apm_info.bios.cseg_16, apm_info.bios.dseg);
if (apm_info.bios.version > 0x100)
@@ -2369,8 +2362,7 @@ static int __init apm_init(void)
wake_up_process(kapmd_task);
if (num_online_cpus() > 1 && !smp) {
- printk(KERN_NOTICE
- "apm: disabled - APM is not SMP safe (power off active).\n");
+ pr_notice("apm: disabled - APM is not SMP safe (power off active).\n");
return 0;
}
@@ -2380,7 +2372,7 @@ static int __init apm_init(void)
* control it. just log the error
*/
if (misc_register(&apm_device))
- printk(KERN_WARNING "apm: Could not register misc device.\n");
+ pr_warn("apm: Could not register misc device.\n");
if (HZ != 100)
idle_period = (idle_period * HZ) / 100;
diff --git a/arch/x86/kernel/bootflag.c b/arch/x86/kernel/bootflag.c
index 52c8e3c..e16e24e 100644
--- a/arch/x86/kernel/bootflag.c
+++ b/arch/x86/kernel/bootflag.c
@@ -41,7 +41,7 @@ static void __init sbf_write(u8 v)
if (!parity(v))
v |= SBF_PARITY;
- printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n",
+ pr_info("Simple Boot Flag at 0x%x set to 0x%x\n",
sbf_port, v);
spin_lock_irqsave(&rtc_lock, flags);
@@ -84,7 +84,7 @@ static int __init sbf_init(void)
v = sbf_read();
if (!sbf_value_valid(v)) {
- printk(KERN_WARNING "Simple Boot Flag value 0x%x read from "
+ pr_warn("Simple Boot Flag value 0x%x read from "
"CMOS RAM was invalid\n", v);
}
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 145863d..3a54dad 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -112,7 +112,8 @@ void __init setup_bios_corruption_check(void)
}
if (num_scan_areas)
- printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas);
+ pr_info("Scanning %d areas for low memory corruption\n",
+ num_scan_areas);
}
@@ -131,8 +132,8 @@ void check_for_bios_corruption(void)
for (; size; addr++, size -= sizeof(unsigned long)) {
if (!*addr)
continue;
- printk(KERN_ERR "Corrupted low memory at %p (%lx phys) = %08lx\n",
- addr, __pa(addr), *addr);
+ pr_err("Corrupted low memory at %p (%lx phys) = %08lx\n",
+ addr, __pa(addr), *addr);
corruption = 1;
*addr = 0;
}
@@ -156,8 +157,8 @@ static int start_periodic_check_for_corruption(void)
if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0)
return 0;
- printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
- corruption_check_period);
+ pr_info("Scanning for low memory corruption every %d seconds\n",
+ corruption_check_period);
/* First time we run the checks right away */
schedule_delayed_work(&bios_check_work, 0);
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 2836de3..5d0abd0 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -165,7 +165,7 @@ static int __init cpuid_init(void)
if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS,
"cpu/cpuid", &cpuid_fops)) {
- printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
+ pr_err("cpuid: unable to get major %d for cpuid\n",
CPUID_MAJOR);
err = -EBUSY;
goto out;
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 11891ca..0deb4c7 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -65,8 +65,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
kunmap_atomic(vaddr);
} else {
if (!kdump_buf_page) {
- printk(KERN_WARNING "Kdump: Kdump buffer page not"
- " allocated\n");
+ pr_warn("Kdump: Kdump buffer page not allocated\n");
kunmap_atomic(vaddr);
return -EFAULT;
}
@@ -85,8 +84,7 @@ static int __init kdump_buf_page_init(void)
kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!kdump_buf_page) {
- printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
- " page\n");
+ pr_warn("Kdump: Failed to allocate kdump buffer page\n");
ret = -ENOMEM;
}
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 1f4acd6..8468af7 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -235,8 +235,8 @@ static void __init dtb_add_ioapic(struct device_node *dn)
ret = of_address_to_resource(dn, 0, &r);
if (ret) {
- printk(KERN_ERR "Can't obtain address from node %s.\n",
- dn->full_name);
+ pr_err("Can't obtain address from node %s.\n",
+ dn->full_name);
return;
}
mp_register_ioapic(++ioapic_id, r.start, gsi_top, &cfg);
@@ -253,7 +253,7 @@ static void __init dtb_ioapic_setup(void)
of_ioapic = 1;
return;
}
- printk(KERN_ERR "Error: No information about IO-APIC in OF.\n");
+ pr_err("Error: No information about IO-APIC in OF.\n");
}
#else
static void __init dtb_ioapic_setup(void) {}
diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
index f6dfd93..1e94e18 100644
--- a/arch/x86/kernel/doublefault.c
+++ b/arch/x86/kernel/doublefault.c
@@ -24,23 +24,24 @@ static void doublefault_fn(void)
native_store_gdt(&gdt_desc);
gdt = gdt_desc.address;
- printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+ pr_emerg("PANIC: double fault, gdt at %08lx [%d bytes]\n",
+ gdt, gdt_desc.size);
if (ptr_ok(gdt)) {
gdt += GDT_ENTRY_TSS << 3;
tss = get_desc_base((struct desc_struct *)gdt);
- printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
+ pr_emerg("double fault, tss at %08lx\n", tss);
if (ptr_ok(tss)) {
struct x86_hw_tss *t = (struct x86_hw_tss *)tss;
- printk(KERN_EMERG "eip = %08lx, esp = %08lx\n",
- t->ip, t->sp);
+ pr_emerg("eip = %08lx, esp = %08lx\n",
+ t->ip, t->sp);
- printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
- t->ax, t->bx, t->cx, t->dx);
- printk(KERN_EMERG "esi = %08lx, edi = %08lx\n",
- t->si, t->di);
+ pr_emerg("eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
+ t->ax, t->bx, t->cx, t->dx);
+ pr_emerg("esi = %08lx, edi = %08lx\n",
+ t->si, t->di);
}
}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 32e5699..993706a 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -285,12 +285,12 @@ int __die(const char *str, struct pt_regs *regs, long err)
sp = kernel_stack_pointer(regs);
savesegment(ss, ss);
}
- printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
+ pr_emerg("EIP: [<%08lx>] ", regs->ip);
print_symbol("%s", regs->ip);
printk(" SS:ESP %04x:%08lx\n", ss, sp);
#else
/* Executive summary in case the oops scrolled away */
- printk(KERN_ALERT "RIP ");
+ pr_alert("RIP ");
printk_address(regs->ip);
printk(" RSP <%016lx>\n", regs->sp);
#endif
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 621b501..3cec9c2 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -114,7 +114,7 @@ static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
int x = e820x->nr_map;
if (x >= ARRAY_SIZE(e820x->map)) {
- printk(KERN_ERR "e820: too many entries; ignoring [mem %#010llx-%#010llx]\n",
+ pr_err("e820: too many entries; ignoring [mem %#010llx-%#010llx]\n",
(unsigned long long) start,
(unsigned long long) (start + size - 1));
return;
@@ -136,26 +136,26 @@ static void __init e820_print_type(u32 type)
switch (type) {
case E820_RAM:
case E820_RESERVED_KERN:
- printk(KERN_CONT "usable");
+ pr_cont("usable");
break;
case E820_RESERVED:
- printk(KERN_CONT "reserved");
+ pr_cont("reserved");
break;
case E820_ACPI:
- printk(KERN_CONT "ACPI data");
+ pr_cont("ACPI data");
break;
case E820_NVS:
- printk(KERN_CONT "ACPI NVS");
+ pr_cont("ACPI NVS");
break;
case E820_UNUSABLE:
- printk(KERN_CONT "unusable");
+ pr_cont("unusable");
break;
case E820_PMEM:
case E820_PRAM:
- printk(KERN_CONT "persistent (type %u)", type);
+ pr_cont("persistent (type %u)", type);
break;
default:
- printk(KERN_CONT "type %u", type);
+ pr_cont("type %u", type);
break;
}
}
@@ -165,12 +165,12 @@ void __init e820_print_map(char *who)
int i;
for (i = 0; i < e820.nr_map; i++) {
- printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who,
+ pr_info("%s: [mem %#018Lx-%#018Lx] ", who,
(unsigned long long) e820.map[i].addr,
(unsigned long long)
(e820.map[i].addr + e820.map[i].size - 1));
e820_print_type(e820.map[i].type);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
}
@@ -435,12 +435,12 @@ static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
size = ULLONG_MAX - start;
end = start + size;
- printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ",
- (unsigned long long) start, (unsigned long long) (end - 1));
+ pr_debug("e820: update [mem %#010Lx-%#010Lx] ",
+ (unsigned long long) start, (unsigned long long) (end - 1));
e820_print_type(old_type);
- printk(KERN_CONT " ==> ");
+ pr_cont(" ==> ");
e820_print_type(new_type);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
for (i = 0; i < e820x->nr_map; i++) {
struct e820entry *ei = &e820x->map[i];
@@ -515,11 +515,11 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
size = ULLONG_MAX - start;
end = start + size;
- printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ",
- (unsigned long long) start, (unsigned long long) (end - 1));
+ pr_debug("e820: remove [mem %#010Lx-%#010Lx] ",
+ (unsigned long long) start, (unsigned long long) (end - 1));
if (checktype)
e820_print_type(old_type);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
@@ -568,7 +568,7 @@ void __init update_e820(void)
{
if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map))
return;
- printk(KERN_INFO "e820: modified physical RAM map:\n");
+ pr_info("e820: modified physical RAM map:\n");
e820_print_map("modified");
}
static void __init update_e820_saved(void)
@@ -633,9 +633,9 @@ __init void e820_setup_gap(void)
#ifdef CONFIG_X86_64
if (!found) {
gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
- printk(KERN_ERR
- "e820: cannot find a gap in the 32bit address range\n"
- "e820: PCI devices with unassigned 32bit BARs may break!\n");
+ pr_err(
+ "e820: cannot find a gap in the 32bit address range\n"
+ "e820: PCI devices with unassigned 32bit BARs may break!\n");
}
#endif
@@ -644,9 +644,8 @@ __init void e820_setup_gap(void)
*/
pci_mem_start = gapstart;
- printk(KERN_INFO
- "e820: [mem %#010lx-%#010lx] available for PCI devices\n",
- gapstart, gapstart + gapsize - 1);
+ pr_info("e820: [mem %#010lx-%#010lx] available for PCI devices\n",
+ gapstart, gapstart + gapsize - 1);
}
/**
@@ -667,7 +666,7 @@ void __init parse_e820_ext(u64 phys_addr, u32 data_len)
__append_e820_map(extmap, entries);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
early_memunmap(sdata, data_len);
- printk(KERN_INFO "e820: extended physical RAM map:\n");
+ pr_info("e820: extended physical RAM map:\n");
e820_print_map("extended");
}
@@ -734,7 +733,7 @@ u64 __init early_reserve_e820(u64 size, u64 align)
addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
if (addr) {
e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
- printk(KERN_INFO "e820: update e820_saved for early_reserve_e820\n");
+ pr_info("e820: update e820_saved for early_reserve_e820\n");
update_e820_saved();
}
@@ -788,8 +787,8 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn)
if (last_pfn > max_arch_pfn)
last_pfn = max_arch_pfn;
- printk(KERN_INFO "e820: last_pfn = %#lx max_arch_pfn = %#lx\n",
- last_pfn, max_arch_pfn);
+ pr_info("e820: last_pfn = %#lx max_arch_pfn = %#lx\n",
+ last_pfn, max_arch_pfn);
return last_pfn;
}
unsigned long __init e820_end_of_ram_pfn(void)
@@ -823,7 +822,7 @@ static int __init parse_memopt(char *p)
setup_clear_cpu_cap(X86_FEATURE_PSE);
return 0;
#else
- printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n");
+ pr_warn("mem=nopentium ignored! (only supported on x86_32)\n");
return -EINVAL;
#endif
}
@@ -907,7 +906,7 @@ void __init finish_e820_parsing(void)
&e820.nr_map) < 0)
early_panic("Invalid user supplied memory map");
- printk(KERN_INFO "e820: user-defined physical RAM map:\n");
+ pr_info("e820: user-defined physical RAM map:\n");
e820_print_map("user");
}
}
@@ -1073,9 +1072,8 @@ void __init e820_reserve_resources_late(void)
end = MAX_RESOURCE_SIZE;
if (start >= end)
continue;
- printk(KERN_DEBUG
- "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n",
- start, end);
+ pr_debug("e820: reserve RAM buffer [mem %#010llx-%#010llx]\n",
+ start, end);
reserve_region_with_split(&iomem_resource, start, end,
"RAM buffer");
}
@@ -1125,7 +1123,7 @@ void __init setup_memory_map(void)
who = x86_init.resources.memory_setup();
memcpy(&e820_saved, &e820, sizeof(struct e820map));
- printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n");
+ pr_info("e820: BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index bca14c8..e843f15 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -33,13 +33,10 @@ static void __init fix_hypertransport_config(int num, int slot, int func)
*/
htcfg = read_pci_config(num, slot, func, 0x68);
if (htcfg & (1 << 18)) {
- printk(KERN_INFO "Detected use of extended apic ids "
- "on hypertransport bus\n");
+ pr_info("Detected use of extended apic ids on hypertransport bus\n");
if ((htcfg & (1 << 17)) == 0) {
- printk(KERN_INFO "Enabling hypertransport extended "
- "apic interrupt broadcast\n");
- printk(KERN_INFO "Note this is a bios bug, "
- "please contact your hw vendor\n");
+ pr_info("Enabling hypertransport extended apic interrupt broadcast\n");
+ pr_info("Note this is a bios bug, please contact your hw vendor\n");
htcfg |= (1 << 17);
write_pci_config(num, slot, func, 0x68, htcfg);
}
@@ -53,9 +50,7 @@ static void __init via_bugs(int num, int slot, int func)
#ifdef CONFIG_GART_IOMMU
if ((max_pfn > MAX_DMA32_PFN || force_iommu) &&
!gart_iommu_aperture_allowed) {
- printk(KERN_INFO
- "Looks like a VIA chipset. Disabling IOMMU."
- " Override with iommu=allowed\n");
+ pr_info("Looks like a VIA chipset. Disabling IOMMU. Override with iommu=allowed\n");
gart_iommu_aperture_disabled = 1;
}
#endif
@@ -87,11 +82,8 @@ static void __init nvidia_bugs(int num, int slot, int func)
if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) {
acpi_skip_timer_override = 1;
- printk(KERN_INFO "Nvidia board "
- "detected. Ignoring ACPI "
- "timer override.\n");
- printk(KERN_INFO "If you got timer trouble "
- "try acpi_use_timer_override\n");
+ pr_info("Nvidia board detected. Ignoring ACPI timer override.\n");
+ pr_info("If you got timer trouble try acpi_use_timer_override\n");
}
#endif
#endif
@@ -137,10 +129,9 @@ static void __init ati_bugs(int num, int slot, int func)
}
if (acpi_skip_timer_override) {
- printk(KERN_INFO "SB4X0 revision 0x%x\n", d);
- printk(KERN_INFO "Ignoring ACPI timer override.\n");
- printk(KERN_INFO "If you got timer trouble "
- "try acpi_use_timer_override\n");
+ pr_info("SB4X0 revision 0x%x\n", d);
+ pr_info("Ignoring ACPI timer override.\n");
+ pr_info("If you got timer trouble try acpi_use_timer_override\n");
}
}
@@ -179,10 +170,9 @@ static void __init ati_bugs_contd(int num, int slot, int func)
acpi_skip_timer_override = 1;
if (acpi_skip_timer_override) {
- printk(KERN_INFO "SB600 revision 0x%x\n", rev);
- printk(KERN_INFO "Ignoring ACPI timer override.\n");
- printk(KERN_INFO "If you got timer trouble "
- "try acpi_use_timer_override\n");
+ pr_info("SB600 revision 0x%x\n", rev);
+ pr_info("Ignoring ACPI timer override.\n");
+ pr_info("If you got timer trouble try acpi_use_timer_override\n");
}
}
#else
@@ -569,8 +559,8 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
size = stolen_funcs->size(num, slot, func);
start = stolen_funcs->base(num, slot, func, size);
if (size && start) {
- printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n",
- start, start + (u32)size - 1);
+ pr_info("Reserving Intel graphics stolen memory at 0x%x-0x%x\n",
+ start, start + (u32)size - 1);
/* Mark this space as reserved */
e820_add_region(start, size, E820_RESERVED);
sanitize_e820_map(e820.map,
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 21bf924..be6dcb0 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -319,8 +319,8 @@ static struct console early_serial_console = {
static void early_console_register(struct console *con, int keep_early)
{
if (con->index != -1) {
- printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
- con->name);
+ pr_crit("ERROR: earlyprintk= %s already used\n",
+ con->name);
return;
}
early_console = con;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 6d9f0a7..66d1b87 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -339,7 +339,8 @@ static void __init fpu__init_system_ctx_switch(void)
if (eagerfpu == ENABLE)
setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
- printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
+ pr_info("x86/fpu: Using '%s' FPU context switches.\n",
+ eagerfpu == ENABLE ? "eager" : "lazy");
}
/*
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index d425cda5..4fb56f9 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -222,7 +222,8 @@ static void __init setup_xstate_features(void)
"x86/fpu: misordered xstate at %d\n", last_good_offset);
last_good_offset = xstate_offsets[i];
- printk(KERN_INFO "x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n", i, ebx, i, eax);
+ pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
+ i, ebx, i, eax);
}
}
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index be0ebbb..c70b365 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -139,30 +139,30 @@ EXPORT_SYMBOL_GPL(is_hpet_enabled);
static void _hpet_print_config(const char *function, int line)
{
u32 i, timers, l, h;
- printk(KERN_INFO "hpet: %s(%d):\n", function, line);
+ pr_info("hpet: %s(%d):\n", function, line);
l = hpet_readl(HPET_ID);
h = hpet_readl(HPET_PERIOD);
timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
- printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
+ pr_info("hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
l = hpet_readl(HPET_CFG);
h = hpet_readl(HPET_STATUS);
- printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
+ pr_info("hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
l = hpet_readl(HPET_COUNTER);
h = hpet_readl(HPET_COUNTER+4);
- printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
+ pr_info("hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
for (i = 0; i < timers; i++) {
l = hpet_readl(HPET_Tn_CFG(i));
h = hpet_readl(HPET_Tn_CFG(i)+4);
- printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
+ pr_info("hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
i, l, h);
l = hpet_readl(HPET_Tn_CMP(i));
h = hpet_readl(HPET_Tn_CMP(i)+4);
- printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
+ pr_info("hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
i, l, h);
l = hpet_readl(HPET_Tn_ROUTE(i));
h = hpet_readl(HPET_Tn_ROUTE(i)+4);
- printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
+ pr_info("hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
i, l, h);
}
}
@@ -289,7 +289,7 @@ static void hpet_legacy_clockevent_register(void)
clockevents_config_and_register(&hpet_clockevent, hpet_freq,
HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
global_clock_event = &hpet_clockevent;
- printk(KERN_DEBUG "hpet clockevent registered\n");
+ pr_debug("hpet clockevent registered\n");
}
static int hpet_set_periodic(struct clock_event_device *evt, int timer)
@@ -526,8 +526,8 @@ static irqreturn_t hpet_interrupt_handler(int irq, void *data)
struct clock_event_device *hevt = &dev->evt;
if (!hevt->event_handler) {
- printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
- dev->num);
+ pr_info("Spurious HPET timer interrupt on HPET timer %d\n",
+ dev->num);
return IRQ_HANDLED;
}
@@ -547,7 +547,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
enable_irq(dev->irq);
- printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
+ pr_debug("hpet: %s irq %d for MSI\n",
dev->name, dev->irq);
return 0;
@@ -646,7 +646,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
break;
}
- printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
+ pr_info("HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
num_timers, num_timers_used);
}
@@ -800,8 +800,7 @@ static int hpet_clocksource_register(void)
} while ((now - start) < 200000UL);
if (t1 == hpet_readl(HPET_COUNTER)) {
- printk(KERN_WARNING
- "HPET counter not counting. HPET disabled\n");
+ pr_warn("HPET counter not counting. HPET disabled\n");
return -ENODEV;
}
@@ -845,9 +844,7 @@ int __init hpet_enable(void)
*/
for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
if (i == 1000) {
- printk(KERN_WARNING
- "HPET config register value = 0xFFFFFFFF. "
- "Disabling HPET\n");
+ pr_warn("HPET config register value = 0xFFFFFFFF. Disabling HPET\n");
goto out_nohpet;
}
}
@@ -1231,7 +1228,7 @@ static void hpet_rtc_timer_reinit(void)
if (hpet_rtc_flags & RTC_PIE)
hpet_pie_count += lost_ints;
if (printk_ratelimit())
- printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
+ pr_warn("hpet1: lost %d rtc interrupts\n",
lost_ints);
}
}
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index be22f5a..c069219 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -204,8 +204,7 @@ spurious_8259A_irq:
* lets ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
- printk(KERN_DEBUG
- "spurious 8259A interrupt: IRQ%d.\n", irq);
+ pr_debug("spurious 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}
atomic_inc(&irq_err_count);
@@ -313,7 +312,7 @@ static int probe_8259A(void)
outb(probe_val, PIC_MASTER_IMR);
new_val = inb(PIC_MASTER_IMR);
if (new_val != probe_val) {
- printk(KERN_INFO "Using NULL legacy PIC\n");
+ pr_info("Using NULL legacy PIC\n");
legacy_pic = &null_legacy_pic;
}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 38da8f2..3b3b230 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -38,7 +38,7 @@ static int check_stack_overflow(void)
static void print_stack_overflow(void)
{
- printk(KERN_WARNING "low stack detected by irq handler\n");
+ pr_warn("low stack detected by irq handler\n");
dump_stack();
if (sysctl_panic_on_stackoverflow)
panic("low stack detected by irq handler - check messages\n");
@@ -124,8 +124,8 @@ void irq_ctx_init(int cpu)
THREAD_SIZE_ORDER));
per_cpu(softirq_stack, cpu) = irqstk;
- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
+ pr_debug("CPU %u irqstacks, hard=%p soft=%p\n",
+ cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
}
void do_softirq_own_stack(void)
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index e565e0e..c4c705b 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -31,8 +31,8 @@ static void bug_at(unsigned char *ip, int line)
* Something went wrong. Crash the box, as something could be
* corrupting the kernel.
*/
- pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
- ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
+ pr_warn("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
+ ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
BUG();
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 44256a6..d13f181 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -300,7 +300,7 @@ kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
return -1;
if (hw_break_release_slot(i)) {
- printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr);
+ pr_err("Cannot remove hw breakpoint at %lx\n", addr);
return -1;
}
breakinfo[i].enabled = 0;
@@ -327,7 +327,7 @@ static void kgdb_remove_all_hw_break(void)
early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
breakinfo[i].type);
else if (hw_break_release_slot(i))
- printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
+ pr_err("KGDB: hw bpt remove failed %lx\n",
breakinfo[i].addr);
breakinfo[i].enabled = 0;
}
@@ -498,7 +498,7 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
* Single step exception from kernel space to user space so
* eat the exception and continue the process:
*/
- printk(KERN_ERR "KGDB: trap/step from kernel to user space, "
+ pr_err("KGDB: trap/step from kernel to user space, "
"resuming...\n");
kgdb_arch_handle_exception(args->trapnr, args->signr,
args->err, "c", "", regs);
@@ -675,7 +675,7 @@ void kgdb_arch_late(void)
continue;
breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
if (IS_ERR((void * __force)breakinfo[i].pev)) {
- printk(KERN_ERR "kgdb: Could not allocate hw"
+ pr_err("kgdb: Could not allocate hw"
"breakpoints\nDisabling the kernel debugger\n");
breakinfo[i].pev = NULL;
kgdb_arch_exit();
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 1deffe6..7d738d6 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -581,8 +581,8 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
* Raise a BUG or we'll continue in an endless reentering loop
* and eventually a stack overflow.
*/
- printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
- p->addr);
+ pr_warn("Unrecoverable kprobe detected at %p.\n",
+ p->addr);
dump_kprobe(p);
BUG();
default:
@@ -1092,12 +1092,11 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
(addr < (u8 *) jprobe_return_end)) {
if (stack_addr(regs) != saved_sp) {
struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
- printk(KERN_ERR
- "current sp %p does not match saved sp %p\n",
+ pr_err("current sp %p does not match saved sp %p\n",
stack_addr(regs), saved_sp);
- printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
+ pr_err("Saved registers for jprobe %p\n", jp);
show_regs(saved_regs);
- printk(KERN_ERR "Current registers\n");
+ pr_err("Current registers\n");
show_regs(regs);
BUG();
}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 47190bd..1cc3176 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -344,8 +344,8 @@ static void kvm_guest_cpu_init(void)
#endif
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
__this_cpu_write(apf_reason.enabled, 1);
- printk(KERN_INFO"KVM setup async PF for cpu %d\n",
- smp_processor_id());
+ pr_info("KVM setup async PF for cpu %d\n",
+ smp_processor_id());
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
@@ -370,8 +370,8 @@ static void kvm_pv_disable_apf(void)
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
__this_cpu_write(apf_reason.enabled, 0);
- printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
- smp_processor_id());
+ pr_info("Unregister pv shared memory for cpu %d\n",
+ smp_processor_id());
}
static void kvm_pv_guest_cpu_reboot(void *unused)
@@ -692,7 +692,7 @@ static struct dentry *kvm_init_debugfs(void)
{
d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
if (!d_kvm_debug)
- printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
+ pr_warn("Could not create 'kvm' debugfs directory\n");
return d_kvm_debug;
}
@@ -883,7 +883,7 @@ static __init int kvm_spinlock_init_jump(void)
return 0;
static_key_slow_inc(¶virt_ticketlocks_enabled);
- printk(KERN_INFO "KVM setup paravirtual spinlock\n");
+ pr_info("KVM setup paravirtual spinlock\n");
return 0;
}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 72cef58..553f9bd 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -114,8 +114,8 @@ static inline void kvm_sched_clock_init(bool stable)
pv_time_ops.sched_clock = kvm_sched_clock_read;
set_sched_clock_stable();
- printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
- kvm_sched_clock_offset);
+ pr_info("kvm-clock: using sched offset of %llu cycles\n",
+ kvm_sched_clock_offset);
BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) >
sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
@@ -195,8 +195,8 @@ int kvm_register_clock(char *txt)
low = (int)slow_virt_to_phys(src) | 1;
high = ((u64)slow_virt_to_phys(src) >> 32);
ret = native_write_msr_safe(msr_kvm_system_time, low, high);
- printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
- cpu, high, low, txt);
+ pr_info("kvm-clock: cpu %d, msr %x:%x, %s\n",
+ cpu, high, low, txt);
return ret;
}
@@ -263,7 +263,7 @@ void __init kvmclock_init(void)
} else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)))
return;
- printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+ pr_info("kvm-clock: Using msrs %x and %x",
msr_kvm_system_time, msr_kvm_wall_clock);
mem = memblock_alloc(size, PAGE_SIZE);
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index f4c886d..1e49af9 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -205,7 +205,7 @@ void fam10h_check_enable_mmcfg(void)
return;
}
- printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n");
+ pr_info("Enable MMCONFIG on AMD Family 10h\n");
val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) |
(FAM10H_MMIO_CONF_BUSRANGE_MASK<<FAM10H_MMIO_CONF_BUSRANGE_SHIFT));
val |= fam10h_pci_mmconf_base | (8 << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 005c03e..3edc48d 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -37,12 +37,12 @@
#if 0
#define DEBUGP(fmt, ...) \
- printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+ pr_debug(fmt, ##__VA_ARGS__)
#else
#define DEBUGP(fmt, ...) \
do { \
if (0) \
- printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+ pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
#endif
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index 6d9582e..14a678d 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -119,15 +119,15 @@ static void __init dotest(void (*testcase_fn)(void), int expected)
unexpected_testcase_failures++;
if (nmi_fail == FAILURE)
- printk(KERN_CONT "FAILED |");
+ pr_cont("FAILED |");
else if (nmi_fail == TIMEOUT)
- printk(KERN_CONT "TIMEOUT|");
+ pr_cont("TIMEOUT|");
else
- printk(KERN_CONT "ERROR |");
+ pr_cont("ERROR |");
dump_stack();
} else {
testcase_successes++;
- printk(KERN_CONT " ok |");
+ pr_cont(" ok |");
}
testcase_total++;
@@ -152,10 +152,10 @@ void __init nmi_selftest(void)
print_testname("remote IPI");
dotest(remote_ipi, SUCCESS);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
print_testname("local IPI");
dotest(local_ipi, SUCCESS);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
cleanup_nmi_testsuite();
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index f08ac28..938fff9 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -67,7 +67,7 @@ u64 _paravirt_ident_64(u64 x)
void __init default_banner(void)
{
- printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+ pr_info("Booting paravirtualized kernel on %s\n",
pv_info.name);
}
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 833b1d3..79a95be 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -586,19 +586,19 @@ static void calioc2_tce_cache_blast(struct iommu_table *tbl)
unsigned char bus = tbl->it_busno;
begin:
- printk(KERN_DEBUG "Calgary: CalIOC2 bus 0x%x entering tce cache blast "
- "sequence - count %d\n", bus, count);
+ pr_debug("Calgary: CalIOC2 bus 0x%x entering tce cache blast sequence - count %d\n",
+ bus, count);
/* 1. using the Page Migration Control reg set SoftStop */
target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "1a. read 0x%x [LE] from %p\n", val, target);
+ pr_debug("1a. read 0x%x [LE] from %p\n", val, target);
val |= PMR_SOFTSTOP;
- printk(KERN_DEBUG "1b. writing 0x%x [LE] to %p\n", val, target);
+ pr_debug("1b. writing 0x%x [LE] to %p\n", val, target);
writel(cpu_to_be32(val), target);
/* 2. poll split queues until all DMA activity is done */
- printk(KERN_DEBUG "2a. starting to poll split queues\n");
+ pr_debug("2a. starting to poll split queues\n");
target = calgary_reg(bbar, split_queue_offset(bus));
do {
val64 = readq(target);
@@ -610,7 +610,7 @@ begin:
/* 3. poll Page Migration DEBUG for SoftStopFault */
target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "3. read 0x%x [LE] from %p\n", val, target);
+ pr_debug("3. read 0x%x [LE] from %p\n", val, target);
/* 4. if SoftStopFault - goto (1) */
if (val & PMR_SOFTSTOPFAULT) {
@@ -624,32 +624,32 @@ begin:
/* 5. Slam into HardStop by reading PHB_PAGE_MIG_CTRL */
target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
- printk(KERN_DEBUG "5a. slamming into HardStop by reading %p\n", target);
+ pr_debug("5a. slamming into HardStop by reading %p\n", target);
val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "5b. read 0x%x [LE] from %p\n", val, target);
+ pr_debug("5b. read 0x%x [LE] from %p\n", val, target);
target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "5c. read 0x%x [LE] from %p (debug)\n", val, target);
+ pr_debug("5c. read 0x%x [LE] from %p (debug)\n", val, target);
/* 6. invalidate TCE cache */
- printk(KERN_DEBUG "6. invalidating TCE cache\n");
+ pr_debug("6. invalidating TCE cache\n");
target = calgary_reg(bbar, tar_offset(bus));
writeq(tbl->tar_val, target);
/* 7. Re-read PMCR */
- printk(KERN_DEBUG "7a. Re-reading PMCR\n");
+ pr_debug("7a. Re-reading PMCR\n");
target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "7b. read 0x%x [LE] from %p\n", val, target);
+ pr_debug("7b. read 0x%x [LE] from %p\n", val, target);
/* 8. Remove HardStop */
- printk(KERN_DEBUG "8a. removing HardStop from PMCR\n");
+ pr_debug("8a. removing HardStop from PMCR\n");
target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
val = 0;
- printk(KERN_DEBUG "8b. writing 0x%x [LE] to %p\n", val, target);
+ pr_debug("8b. writing 0x%x [LE] to %p\n", val, target);
writel(cpu_to_be32(val), target);
val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "8c. read 0x%x [LE] from %p\n", val, target);
+ pr_debug("8c. read 0x%x [LE] from %p\n", val, target);
}
static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
@@ -885,8 +885,8 @@ static void calioc2_dump_error_regs(struct iommu_table *tbl)
/* root complex status */
target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS);
rcstat = be32_to_cpu(readl(target));
- printk(KERN_EMERG "Calgary: 0x%08x@...x\n", rcstat,
- PHB_ROOT_COMPLEX_STATUS);
+ pr_emerg("Calgary: 0x%08x@...x\n", rcstat,
+ PHB_ROOT_COMPLEX_STATUS);
}
static void calgary_watchdog(unsigned long data)
@@ -998,11 +998,10 @@ static void __init calgary_enable_translation(struct pci_dev *dev)
val32 = be32_to_cpu(readl(target));
val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
- printk(KERN_INFO "Calgary: enabling translation on %s PHB %#x\n",
- (dev->device == PCI_DEVICE_ID_IBM_CALGARY) ?
- "Calgary" : "CalIOC2", busnum);
- printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
- "bus.\n");
+ pr_info("Calgary: enabling translation on %s PHB %#x\n",
+ (dev->device == PCI_DEVICE_ID_IBM_CALGARY) ?
+ "Calgary" : "CalIOC2", busnum);
+ pr_info("Calgary: errant DMAs will now be prevented on this bus.\n");
writel(cpu_to_be32(val32), target);
readl(target); /* flush */
@@ -1030,7 +1029,7 @@ static void __init calgary_disable_translation(struct pci_dev *dev)
val32 = be32_to_cpu(readl(target));
val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE);
- printk(KERN_INFO "Calgary: disabling translation on PHB %#x!\n", busnum);
+ pr_info("Calgary: disabling translation on PHB %#x!\n", busnum);
writel(cpu_to_be32(val32), target);
readl(target); /* flush */
@@ -1064,8 +1063,8 @@ static int __init calgary_init_one(struct pci_dev *dev)
if (dev->bus->parent) {
if (dev->bus->parent->self)
- printk(KERN_WARNING "Calgary: IEEEE, dev %p has "
- "bus->parent->self!\n", dev);
+ pr_warn("Calgary: IEEEE, dev %p has bus->parent->self!\n",
+ dev);
dev->bus->parent->self = dev;
} else
dev->bus->self = dev;
@@ -1244,9 +1243,7 @@ static int __init build_detail_arrays(void)
numnodes = rio_table_hdr->num_scal_dev;
if (numnodes > MAX_NUMNODES){
- printk(KERN_WARNING
- "Calgary: MAX_NUMNODES too low! Defined as %d, "
- "but system has %d nodes.\n",
+ pr_warn("Calgary: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n",
MAX_NUMNODES, numnodes);
return -ENODEV;
}
@@ -1261,9 +1258,8 @@ static int __init build_detail_arrays(void)
rio_detail_size = 15;
break;
default:
- printk(KERN_WARNING
- "Calgary: Invalid Rio Grande Table Version: %d\n",
- rio_table_hdr->version);
+ pr_warn("Calgary: Invalid Rio Grande Table Version: %d\n",
+ rio_table_hdr->version);
return -EPROTO;
}
@@ -1359,12 +1355,12 @@ static int __init calgary_iommu_init(void)
int ret;
/* ok, we're trying to use Calgary - let's roll */
- printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
+ pr_info("PCI-DMA: Using Calgary IOMMU\n");
ret = calgary_init();
if (ret) {
- printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
- "falling back to no_iommu\n", ret);
+ pr_err("PCI-DMA: Calgary init failed %d, falling back to no_iommu\n",
+ ret);
return ret;
}
@@ -1393,7 +1389,7 @@ int __init detect_calgary(void)
if (!early_pci_allowed())
return -ENODEV;
- printk(KERN_DEBUG "Calgary: detecting Calgary via BIOS EBDA area\n");
+ pr_debug("Calgary: detecting Calgary via BIOS EBDA area\n");
ptr = (unsigned long)phys_to_virt(get_bios_ebda());
@@ -1415,14 +1411,13 @@ int __init detect_calgary(void)
offset = *((unsigned short *)(ptr + offset));
}
if (!rio_table_hdr) {
- printk(KERN_DEBUG "Calgary: Unable to locate Rio Grande table "
- "in EBDA - bailing!\n");
+ pr_debug("Calgary: Unable to locate Rio Grande table in EBDA - bailing!\n");
return -ENODEV;
}
ret = build_detail_arrays();
if (ret) {
- printk(KERN_DEBUG "Calgary: build_detail_arrays ret %d\n", ret);
+ pr_debug("Calgary: build_detail_arrays ret %d\n", ret);
return -ENOMEM;
}
@@ -1458,15 +1453,15 @@ int __init detect_calgary(void)
}
}
- printk(KERN_DEBUG "Calgary: finished detection, Calgary %s\n",
- calgary_found ? "found" : "not found");
+ pr_debug("Calgary: finished detection, Calgary %s\n",
+ calgary_found ? "found" : "not found");
if (calgary_found) {
iommu_detected = 1;
calgary_detected = 1;
- printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected.\n");
- printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n",
- specified_table_size);
+ pr_info("PCI-DMA: Calgary IOMMU detected.\n");
+ pr_info("PCI-DMA: Calgary TCE table spec is %d\n",
+ specified_table_size);
x86_init.iommu.iommu_init = calgary_iommu_init;
}
@@ -1524,8 +1519,8 @@ static int __init calgary_parse_options(char *p)
bridge = val;
if (bridge < MAX_PHB_BUS_NUM) {
- printk(KERN_INFO "Calgary: disabling "
- "translation for PHB %#x\n", bridge);
+ pr_info("Calgary: disabling translation for PHB %#x\n",
+ bridge);
bus_info[bridge].translation_disabled = 1;
}
}
@@ -1575,7 +1570,7 @@ static int __init calgary_fixup_tce_spaces(void)
if (no_iommu || swiotlb || !calgary_detected)
return -ENODEV;
- printk(KERN_DEBUG "Calgary: fixing up tce spaces\n");
+ pr_debug("Calgary: fixing up tce spaces\n");
do {
dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
index 35ccf75..79149db 100644
--- a/arch/x86/kernel/pci-iommu_table.c
+++ b/arch/x86/kernel/pci-iommu_table.c
@@ -56,7 +56,7 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
q = find_dependents_of(start, finish, p);
x = find_dependents_of(start, finish, q);
if (p == x) {
- printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n",
+ pr_err("CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n",
p->detect, q->detect);
/* Heavy handed way..*/
x->depend = 0;
@@ -66,7 +66,7 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
for (p = start; p < finish; p++) {
q = find_dependents_of(p, finish, p);
if (q && q > p) {
- printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n",
+ pr_err("EXECUTION ORDER INVALID! %pS should be called before %pS!\n",
p->detect, q->detect);
}
}
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index da15918..4c365495 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -16,8 +16,7 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
if (hwdev && !dma_capable(hwdev, bus, size)) {
if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
- printk(KERN_ERR
- "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
+ pr_err("nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
name, (long long)bus, size,
(long long)*hwdev->dma_mask);
return 0;
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 7c577a1..2cad97b 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -112,8 +112,7 @@ void __init pci_swiotlb_late_init(void)
if (!swiotlb)
swiotlb_free();
else {
- printk(KERN_INFO "PCI-DMA: "
- "Using software bounce buffering for IO (SWIOTLB)\n");
+ pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
swiotlb_print_info();
}
}
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index cc457ff..f3e2642 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -86,7 +86,7 @@ static void ich_force_hpet_resume(void)
if (!(val & 0x80))
BUG();
else
- printk(KERN_DEBUG "Force enabled HPET at resume\n");
+ pr_debug("Force enabled HPET at resume\n");
return;
}
@@ -103,7 +103,7 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
pci_read_config_dword(dev, 0xF0, &rcba);
rcba &= 0xFFFFC000;
if (rcba == 0) {
- dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
+ dev_dbg(&dev->dev, "RCBA disabled; "
"cannot force enable HPET\n");
return;
}
@@ -111,7 +111,7 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
/* use bits 31:14, 16 kB aligned */
rcba_base = ioremap_nocache(rcba, 0x4000);
if (rcba_base == NULL) {
- dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
+ dev_dbg(&dev->dev, "ioremap failed; "
"cannot force enable HPET\n");
return;
}
@@ -123,7 +123,7 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
/* HPET is enabled in HPTC. Just not reported by BIOS */
val = val & 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
- dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
+ dev_dbg(&dev->dev, "Force enabled HPET at "
"0x%lx\n", force_hpet_address);
iounmap(rcba_base);
return;
@@ -143,12 +143,11 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
if (err) {
force_hpet_address = 0;
iounmap(rcba_base);
- dev_printk(KERN_DEBUG, &dev->dev,
- "Failed to force enable HPET\n");
+ dev_dbg(&dev->dev, "Failed to force enable HPET\n");
} else {
force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
- dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
- "0x%lx\n", force_hpet_address);
+ dev_dbg(&dev->dev, "Force enabled HPET at 0x%lx\n",
+ force_hpet_address);
}
}
@@ -177,7 +176,7 @@ static struct pci_dev *cached_dev;
static void hpet_print_force_info(void)
{
- printk(KERN_INFO "HPET not enabled in BIOS. "
+ pr_info("HPET not enabled in BIOS. "
"You might try hpet=force boot option\n");
}
@@ -198,7 +197,7 @@ static void old_ich_force_hpet_resume(void)
val = gen_cntl >> 15;
val &= 0x7;
if (val == 0x4)
- printk(KERN_DEBUG "Force enabled HPET at resume\n");
+ pr_debug("Force enabled HPET at resume\n");
else
BUG();
}
@@ -221,8 +220,7 @@ static void old_ich_force_enable_hpet(struct pci_dev *dev)
if (val & 0x4) {
val &= 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
- dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
- force_hpet_address);
+ dev_dbg(&dev->dev, "HPET at 0x%lx\n", force_hpet_address);
return;
}
@@ -242,14 +240,14 @@ static void old_ich_force_enable_hpet(struct pci_dev *dev)
/* HPET is enabled in HPTC. Just not reported by BIOS */
val &= 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
- dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
- "0x%lx\n", force_hpet_address);
+ dev_dbg(&dev->dev, "Force enabled HPET at 0x%lx\n",
+ force_hpet_address);
cached_dev = dev;
force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
return;
}
- dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
+ dev_dbg(&dev->dev, "Failed to force enable HPET\n");
}
/*
@@ -290,7 +288,7 @@ static void vt8237_force_hpet_resume(void)
pci_read_config_dword(cached_dev, 0x68, &val);
if (val & 0x80)
- printk(KERN_DEBUG "Force enabled HPET at resume\n");
+ pr_debug("Force enabled HPET at resume\n");
else
BUG();
}
@@ -314,7 +312,7 @@ static void vt8237_force_enable_hpet(struct pci_dev *dev)
*/
if (val & 0x80) {
force_hpet_address = (val & ~0x3ff);
- dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
+ dev_dbg(&dev->dev, "HPET at 0x%lx\n",
force_hpet_address);
return;
}
@@ -329,14 +327,14 @@ static void vt8237_force_enable_hpet(struct pci_dev *dev)
pci_read_config_dword(dev, 0x68, &val);
if (val & 0x80) {
force_hpet_address = (val & ~0x3ff);
- dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
+ dev_dbg(&dev->dev, "Force enabled HPET at "
"0x%lx\n", force_hpet_address);
cached_dev = dev;
force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
return;
}
- dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
+ dev_dbg(&dev->dev, "Failed to force enable HPET\n");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
@@ -349,7 +347,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
static void ati_force_hpet_resume(void)
{
pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
- printk(KERN_DEBUG "Force enabled HPET at resume\n");
+ pr_debug("Force enabled HPET at resume\n");
}
static u32 ati_ixp4x0_rev(struct pci_dev *dev)
@@ -366,7 +364,7 @@ static u32 ati_ixp4x0_rev(struct pci_dev *dev)
err |= pci_write_config_dword(dev, 0x70, d);
err |= pci_read_config_dword(dev, 0x8, &d);
d &= 0xff;
- dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
+ dev_dbg(&dev->dev, "SB4X0 revision 0x%x\n", d);
WARN_ON_ONCE(err);
@@ -410,7 +408,7 @@ static void ati_force_enable_hpet(struct pci_dev *dev)
force_hpet_address = val;
force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
- dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
+ dev_dbg(&dev->dev, "Force enabled HPET at 0x%lx\n",
force_hpet_address);
cached_dev = dev;
}
@@ -423,7 +421,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
static void nvidia_force_hpet_resume(void)
{
pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
- printk(KERN_DEBUG "Force enabled HPET at resume\n");
+ pr_debug("Force enabled HPET at resume\n");
}
static void nvidia_force_enable_hpet(struct pci_dev *dev)
@@ -442,7 +440,7 @@ static void nvidia_force_enable_hpet(struct pci_dev *dev)
pci_read_config_dword(dev, 0x44, &val);
force_hpet_address = val & 0xfffffffe;
force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
- dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
+ dev_dbg(&dev->dev, "Force enabled HPET at 0x%lx\n",
force_hpet_address);
cached_dev = dev;
return;
@@ -508,7 +506,7 @@ static void e6xx_force_enable_hpet(struct pci_dev *dev)
force_hpet_address = 0xFED00000;
force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
- dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
+ dev_dbg(&dev->dev, "Force enabled HPET at "
"0x%lx\n", force_hpet_address);
return;
}
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 4af8d06..0ff3fbc 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -48,11 +48,10 @@ int mach_set_rtc_mmss(const struct timespec *now)
if (!rtc_valid_tm(&tm)) {
retval = set_rtc_time(&tm);
if (retval)
- printk(KERN_ERR "%s: RTC write failed with error %d\n",
- __func__, retval);
+ pr_err("%s: RTC write failed with error %d\n",
+ __func__, retval);
} else {
- printk(KERN_ERR
- "%s: Invalid RTC value: write of %lx to RTC failed\n",
+ pr_err("%s: Invalid RTC value: write of %lx to RTC failed\n",
__func__, nowtime);
retval = -EINVAL;
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index aa52c10..c0d783d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -338,12 +338,12 @@ static void __init relocate_initrd(void)
memblock_reserve(relocated_ramdisk, area_size);
initrd_start = relocated_ramdisk + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
- printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
+ pr_info("Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
- printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
+ pr_info("Move RAMDISK from [mem %#010llx-%#010llx] to"
" [mem %#010llx-%#010llx]\n",
ramdisk_image, ramdisk_image + ramdisk_size - 1,
relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
@@ -382,7 +382,7 @@ static void __init reserve_initrd(void)
"disabling initrd (%lld needed, %lld available)\n",
ramdisk_size, mapped_size>>1);
- printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
+ pr_info("RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
ramdisk_end - 1);
if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
@@ -457,7 +457,7 @@ static void __init e820_reserve_setup_data(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
memcpy(&e820_saved, &e820, sizeof(struct e820map));
- printk(KERN_INFO "extended physical RAM map:\n");
+ pr_info("extended physical RAM map:\n");
e820_print_map("reserve setup_data");
}
@@ -713,7 +713,7 @@ static void __init trim_snb_memory(void)
if (!snb_gfx_workaround_needed())
return;
- printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
+ pr_debug("reserving inaccessible SNB gfx pages\n");
/*
* Reserve all memory below the 1 MB mark that has not
@@ -723,8 +723,8 @@ static void __init trim_snb_memory(void)
for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
if (memblock_reserve(bad_pages[i], PAGE_SIZE))
- printk(KERN_WARNING "failed to reserve 0x%08lx\n",
- bad_pages[i]);
+ pr_warn("failed to reserve 0x%08lx\n",
+ bad_pages[i]);
}
}
@@ -881,7 +881,7 @@ void __init setup_arch(char **cmdline_p)
*/
__flush_tlb_all();
#else
- printk(KERN_INFO "Command line: %s\n", boot_command_line);
+ pr_info("Command line: %s\n", boot_command_line);
#endif
/*
@@ -1030,7 +1030,7 @@ void __init setup_arch(char **cmdline_p)
e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
E820_RESERVED);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- printk(KERN_INFO "fixed physical RAM map:\n");
+ pr_info("fixed physical RAM map:\n");
e820_print_map("bad_ppro");
}
#else
@@ -1107,8 +1107,8 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifdef CONFIG_X86_32
- printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
- (max_pfn_mapped<<PAGE_SHIFT) - 1);
+ pr_debug("initial memory mapped: [mem 0x00000000-%#010lx]\n",
+ (max_pfn_mapped<<PAGE_SHIFT) - 1);
#endif
reserve_real_mode();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index e4fcb87..ce3527b 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -206,7 +206,7 @@ void __init setup_per_cpu_areas(void)
pcpu_cpu_distance,
pcpu_fc_alloc, pcpu_fc_free);
if (rc < 0)
- pr_warning("%s allocator failed (%d), falling back to page size\n",
+ pr_warn("%s allocator failed (%d), falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 24d57f7..f0907e4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -692,7 +692,7 @@ void smp_announce(void)
{
int num_nodes = num_online_nodes();
- printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
+ pr_info("x86: Booted up %d node%s, %d CPUs\n",
num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
}
@@ -710,7 +710,7 @@ static void announce_cpu(int cpu, int apicid)
node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
if (cpu == 1)
- printk(KERN_INFO "x86: Booting SMP configuration:\n");
+ pr_info("x86: Booting SMP configuration:\n");
if (system_state == SYSTEM_BOOTING) {
if (node != current_node) {
@@ -718,7 +718,7 @@ static void announce_cpu(int cpu, int apicid)
pr_cont("\n");
current_node = node;
- printk(KERN_INFO ".... node %*s#%d, CPUs: ",
+ pr_info(".... node %*s#%d, CPUs: ",
node_width - num_digits(node), " ", node);
}
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index b285d4e..c659998 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -142,10 +142,9 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
return 0;
}
- printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x "
- "(%dx%d, stride %d)\n", id->ident,
- screen_info.lfb_base, screen_info.lfb_width,
- screen_info.lfb_height, screen_info.lfb_linelength);
+ pr_info("efifb: dmi detected %s - framebuffer at 0x%08x (%dx%d, stride %d)\n",
+ id->ident, screen_info.lfb_base, screen_info.lfb_width,
+ screen_info.lfb_height, screen_info.lfb_linelength);
return 1;
}
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 764a29f..c5b64cd 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -73,7 +73,7 @@ __init int create_simplefb(const struct screen_info *si,
len = mode->height * mode->stride;
len = PAGE_ALIGN(len);
if (len > (u64)si->lfb_size << 16) {
- printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
+ pr_warn("sysfb: VRAM smaller than advertised\n");
return -EINVAL;
}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 91a4496..27b0452 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -70,13 +70,13 @@ void __init tboot_probe(void)
*/
if (!e820_any_mapped(boot_params.tboot_addr,
boot_params.tboot_addr, E820_RESERVED)) {
- pr_warning("non-0 tboot_addr but it is not of type E820_RESERVED\n");
+ pr_warn("non-0 tboot_addr but it is not of type E820_RESERVED\n");
return;
}
/* only a natively booted kernel should be using TXT */
if (paravirt_enabled()) {
- pr_warning("non-0 tboot_addr but pv_ops is enabled\n");
+ pr_warn("non-0 tboot_addr but pv_ops is enabled\n");
return;
}
@@ -84,13 +84,13 @@ void __init tboot_probe(void)
set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr);
tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE);
if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) {
- pr_warning("tboot at 0x%llx is invalid\n",
+ pr_warn("tboot at 0x%llx is invalid\n",
boot_params.tboot_addr);
tboot = NULL;
return;
}
if (tboot->version < 5) {
- pr_warning("tboot version is invalid: %u\n", tboot->version);
+ pr_warn("tboot version is invalid: %u\n", tboot->version);
tboot = NULL;
return;
}
@@ -293,7 +293,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
if (sleep_state >= ACPI_S_STATE_COUNT ||
acpi_shutdown_map[sleep_state] == -1) {
- pr_warning("unsupported sleep state 0x%x\n", sleep_state);
+ pr_warn("unsupported sleep state 0x%x\n", sleep_state);
return -1;
}
@@ -306,7 +306,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
if (!tboot_enabled())
return 0;
- pr_warning("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)");
+ pr_warn("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)");
return -ENODEV;
}
@@ -324,7 +324,7 @@ static int tboot_wait_for_aps(int num_aps)
}
if (timeout)
- pr_warning("tboot wait for APs timeout\n");
+ pr_warn("tboot wait for APs timeout\n");
return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
}
@@ -526,7 +526,7 @@ int tboot_force_iommu(void)
return 0;
if (no_iommu || swiotlb || dmar_disabled)
- pr_warning("Forcing Intel-IOMMU to enabled\n");
+ pr_warn("Forcing Intel-IOMMU to enabled\n");
dmar_disabled = 0;
#ifdef CONFIG_SWIOTLB
diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c
index ab40954..e1a9034 100644
--- a/arch/x86/kernel/tce_64.c
+++ b/arch/x86/kernel/tce_64.c
@@ -113,7 +113,7 @@ static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
bitmapsz = tbl->it_size / BITS_PER_BYTE;
bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
if (!bmppages) {
- printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
+ pr_err("Calgary: cannot allocate bitmap\n");
ret = -ENOMEM;
goto done;
}
@@ -138,14 +138,13 @@ int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar)
int ret;
if (pci_iommu(dev->bus)) {
- printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
+ pr_err("Calgary: dev %p has sysdata->iommu %p\n",
dev, pci_iommu(dev->bus));
BUG();
}
tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
if (!tbl) {
- printk(KERN_ERR "Calgary: error allocating iommu_table\n");
ret = -ENOMEM;
goto done;
}
diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c
index 3f92ce0..9bc948c 100644
--- a/arch/x86/kernel/test_nx.c
+++ b/arch/x86/kernel/test_nx.c
@@ -54,8 +54,8 @@ static void fudze_exception_table(void *marker, void *new)
* table.
*/
if (mod->num_exentries > 1) {
- printk(KERN_ERR "test_nx: too many exception table entries!\n");
- printk(KERN_ERR "test_nx: test results are not reliable.\n");
+ pr_err("test_nx: too many exception table entries!\n");
+ pr_err("test_nx: test results are not reliable.\n");
return;
}
extable = (struct exception_table_entry *)mod->extable;
@@ -115,11 +115,11 @@ static int test_NX(void)
test_data = 0xC3;
- printk(KERN_INFO "Testing NX protection\n");
+ pr_info("Testing NX protection\n");
/* Test 1: check if the stack is not executable */
if (test_address(&stackcode)) {
- printk(KERN_ERR "test_nx: stack was executable\n");
+ pr_err("test_nx: stack was executable\n");
ret = -ENODEV;
}
@@ -131,7 +131,7 @@ static int test_NX(void)
heap[0] = 0xC3; /* opcode for "ret" */
if (test_address(heap)) {
- printk(KERN_ERR "test_nx: heap was executable\n");
+ pr_err("test_nx: heap was executable\n");
ret = -ENODEV;
}
kfree(heap);
@@ -145,10 +145,10 @@ static int test_NX(void)
#ifdef CONFIG_DEBUG_RODATA
/* Test 3: Check if the .rodata section is executable */
if (rodata_test_data != 0xC3) {
- printk(KERN_ERR "test_nx: .rodata marker has invalid value\n");
+ pr_err("test_nx: .rodata marker has invalid value\n");
ret = -ENODEV;
} else if (test_address(&rodata_test_data)) {
- printk(KERN_ERR "test_nx: .rodata section is executable\n");
+ pr_err("test_nx: .rodata section is executable\n");
ret = -ENODEV;
}
#endif
@@ -156,7 +156,7 @@ static int test_NX(void)
#if 0
/* Test 4: Check if the .data section of a module is executable */
if (test_address(&test_data)) {
- printk(KERN_ERR "test_nx: .data section is executable\n");
+ pr_err("test_nx: .data section is executable\n");
ret = -ENODEV;
}
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
index 5ecbfe5..e578b83 100644
--- a/arch/x86/kernel/test_rodata.c
+++ b/arch/x86/kernel/test_rodata.c
@@ -22,7 +22,7 @@ int rodata_test(void)
/* test 1: read the value */
/* If this test fails, some previous testrun has clobbered the state */
if (!rodata_test_data) {
- printk(KERN_ERR "rodata_test: test 1 fails (start data)\n");
+ pr_err("rodata_test: test 1 fails (start data)\n");
return -ENODEV;
}
@@ -50,25 +50,25 @@ int rodata_test(void)
if (!result) {
- printk(KERN_ERR "rodata_test: test data was not read only\n");
+ pr_err("rodata_test: test data was not read only\n");
return -ENODEV;
}
/* test 3: check the value hasn't changed */
/* If this test fails, we managed to overwrite the data */
if (!rodata_test_data) {
- printk(KERN_ERR "rodata_test: Test 3 fails (end data)\n");
+ pr_err("rodata_test: Test 3 fails (end data)\n");
return -ENODEV;
}
/* test 4: check if the rodata section is 4Kb aligned */
start = (unsigned long)__start_rodata;
end = (unsigned long)__end_rodata;
if (start & (PAGE_SIZE - 1)) {
- printk(KERN_ERR "rodata_test: .rodata is not 4k aligned\n");
+ pr_err("rodata_test: .rodata is not 4k aligned\n");
return -ENODEV;
}
if (end & (PAGE_SIZE - 1)) {
- printk(KERN_ERR "rodata_test: .rodata end is not 4k aligned\n");
+ pr_err("rodata_test: .rodata end is not 4k aligned\n");
return -ENODEV;
}
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 78083bf..39419df 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -156,10 +156,10 @@ void check_tsc_sync_source(int cpu)
cpu_relax();
if (nr_warps) {
- pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n",
+ pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n",
smp_processor_id(), cpu);
- pr_warning("Measured %Ld cycles TSC warp between CPUs, "
- "turning off TSC clock.\n", max_warp);
+ pr_warn("Measured %lld cycles TSC warp between CPUs, turning off TSC clock.\n",
+ max_warp);
mark_tsc_unstable("check_tsc_sync_source failed");
} else {
pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index b034b1b..f4b8927 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -97,7 +97,7 @@ static void __init set_vsmp_pv_ops(void)
address = early_ioremap(cfg, 8);
cap = readl(address);
ctl = readl(address + 4);
- printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n",
+ pr_info("vSMP CTL: capabilities:0x%08x control:0x%08x\n",
cap, ctl);
/* If possible, let the vSMP foundation route the interrupt optimally */
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index dd30b7e..d398568 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -22,7 +22,7 @@
#include <asm/pci_x86.h>
#include <asm/acpi.h>
-#define PREFIX "PCI: "
+#define pr_fmt(fmt) "PCI: " fmt
/* Indicate if the mmcfg resources have been placed into the resource table. */
static bool pci_mmcfg_running_state;
@@ -104,10 +104,8 @@ static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
list_add_sorted(new);
mutex_unlock(&pci_mmcfg_lock);
- pr_info(PREFIX
- "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
- "(base %#lx)\n",
- segment, start, end, &new->res, (unsigned long)addr);
+ pr_info("MMCONFIG for domain %04x [bus %02x-%02x] at %pR (base %#lx)\n",
+ segment, start, end, &new->res, (unsigned long)addr);
}
return new;
@@ -364,7 +362,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
name = pci_mmcfg_probes[i].probe();
if (name)
- pr_info(PREFIX "%s with MMCONFIG support\n", name);
+ pr_info("%s with MMCONFIG support\n", name);
}
/* some end_bus_number is crazy, fix it */
@@ -465,8 +463,8 @@ static int __ref is_mmconf_reserved(check_reserved_t is_reserved,
dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
&cfg->res, method);
else
- pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n",
- &cfg->res, method);
+ pr_info("MMCONFIG at %pR reserved in %s\n",
+ &cfg->res, method);
if (old_size != size) {
/* update end_bus */
@@ -484,9 +482,7 @@ static int __ref is_mmconf_reserved(check_reserved_t is_reserved,
"at %pR (base %#lx) (size reduced!)\n",
&cfg->res, (unsigned long) cfg->address);
else
- pr_info(PREFIX
- "MMCONFIG for %04x [bus%02x-%02x] "
- "at %pR (base %#lx) (size reduced!)\n",
+ pr_info("MMCONFIG for %04x [bus%02x-%02x] at %pR (base %#lx) (size reduced!)\n",
cfg->segment, cfg->start_bus, cfg->end_bus,
&cfg->res, (unsigned long) cfg->address);
}
@@ -507,7 +503,7 @@ static int __ref pci_mmcfg_check_reserved(struct device *dev,
"ACPI motherboard resources\n",
&cfg->res);
else
- pr_info(FW_INFO PREFIX
+ pr_info(FW_INFO
"MMCONFIG at %pR not reserved in "
"ACPI motherboard resources\n",
&cfg->res);
@@ -536,7 +532,7 @@ static void __init pci_mmcfg_reject_broken(int early)
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) {
- pr_info(PREFIX "not using MMCONFIG\n");
+ pr_info("not using MMCONFIG\n");
free_all_mmcfg();
return;
}
@@ -560,9 +556,9 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
return 0;
}
- pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
- "is above 4GB, ignored\n", cfg->pci_segment,
- cfg->start_bus_number, cfg->end_bus_number, cfg->address);
+ pr_err("MCFG region for %04x [bus %02x-%02x] at %#llx is above 4GB, ignored\n",
+ cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number,
+ cfg->address);
return -EINVAL;
}
@@ -587,7 +583,7 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
i -= sizeof(struct acpi_mcfg_allocation);
}
if (entries == 0) {
- pr_err(PREFIX "MMCONFIG has no entries\n");
+ pr_err("MMCONFIG has no entries\n");
return -ENODEV;
}
@@ -601,7 +597,7 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
cfg->end_bus_number, cfg->address) == NULL) {
- pr_warn(PREFIX "no memory for MCFG entries\n");
+ pr_warn("no memory for MCFG entries\n");
free_all_mmcfg();
return -ENOMEM;
}
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
index bea5249..5fd3871 100644
--- a/arch/x86/pci/mmconfig_64.c
+++ b/arch/x86/pci/mmconfig_64.c
@@ -13,7 +13,7 @@
#include <asm/e820.h>
#include <asm/pci_x86.h>
-#define PREFIX "PCI: "
+#define pr_fmt(fmt) "PCI: " fmt
static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
{
@@ -137,7 +137,7 @@ int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
{
cfg->virt = mcfg_ioremap(cfg);
if (!cfg->virt) {
- pr_err(PREFIX "can't map MMCONFIG at %pR\n", &cfg->res);
+ pr_err("can't map MMCONFIG at %pR\n", &cfg->res);
return -ENOMEM;
}
--
1.8.3.1
Powered by blists - more mailing lists