[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANRm+CxvhDzcz6jDCdpCGsfd0hHjyEWRGpTPhvL8-ggdtPvP8A@mail.gmail.com>
Date: Tue, 19 Nov 2019 15:01:01 +0800
From: Wanpeng Li <kernellwp@...il.com>
To: Zhenzhong Duan <zhenzhong.duan@...cle.com>
Cc: LKML <linux-kernel@...r.kernel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
"the arch/x86 maintainers" <x86@...nel.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim Krcmar <rkrcmar@...hat.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Juergen Gross <jgross@...e.com>,
Peter Zijlstra <peterz@...radead.org>, will@...nel.org,
linux-hyperv@...r.kernel.org, kvm <kvm@...r.kernel.org>,
mikelley@...rosoft.com, "K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
Sasha Levin <sashal@...nel.org>,
"H. Peter Anvin" <hpa@...or.com>
Subject: Re: [PATCH v8 2/5] x86/kvm: Change print code to use pr_*() format
On Wed, 23 Oct 2019 at 19:23, Zhenzhong Duan <zhenzhong.duan@...cle.com> wrote:
>
> pr_*() is preferred than printk(KERN_* ...), after change all the print
> in arch/x86/kernel/kvm.c will have "kvm-guest: xxx" style.
>
> No functional change.
>
> Signed-off-by: Zhenzhong Duan <zhenzhong.duan@...cle.com>
> Reviewed-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> Cc: Paolo Bonzini <pbonzini@...hat.com>
> Cc: Radim Krcmar <rkrcmar@...hat.com>
> Cc: Sean Christopherson <sean.j.christopherson@...el.com>
> Cc: Vitaly Kuznetsov <vkuznets@...hat.com>
> Cc: Wanpeng Li <wanpengli@...cent.com>
> Cc: Jim Mattson <jmattson@...gle.com>
> Cc: Joerg Roedel <joro@...tes.org>
> Cc: Thomas Gleixner <tglx@...utronix.de>
> Cc: Ingo Molnar <mingo@...hat.com>
> Cc: Borislav Petkov <bp@...en8.de>
> Cc: "H. Peter Anvin" <hpa@...or.com>
Reviewed-by: Wanpeng Li <wanpengli@...cent.com>
> ---
> arch/x86/kernel/kvm.c | 30 ++++++++++++++++--------------
> 1 file changed, 16 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 3bc6a266..6562886 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -7,6 +7,8 @@
> * Authors: Anthony Liguori <aliguori@...ibm.com>
> */
>
> +#define pr_fmt(fmt) "kvm-guest: " fmt
> +
> #include <linux/context_tracking.h>
> #include <linux/init.h>
> #include <linux/kernel.h>
> @@ -286,8 +288,8 @@ static void kvm_register_steal_time(void)
> return;
>
> wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
> - pr_info("kvm-stealtime: cpu %d, msr %llx\n",
> - cpu, (unsigned long long) slow_virt_to_phys(st));
> + pr_info("stealtime: cpu %d, msr %llx\n", cpu,
> + (unsigned long long) slow_virt_to_phys(st));
> }
>
> static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
> @@ -321,8 +323,7 @@ static void kvm_guest_cpu_init(void)
>
> wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
> __this_cpu_write(apf_reason.enabled, 1);
> - printk(KERN_INFO"KVM setup async PF for cpu %d\n",
> - smp_processor_id());
> + pr_info("setup async PF for cpu %d\n", smp_processor_id());
> }
>
> if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
> @@ -347,8 +348,7 @@ static void kvm_pv_disable_apf(void)
> wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
> __this_cpu_write(apf_reason.enabled, 0);
>
> - printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
> - smp_processor_id());
> + pr_info("unregister PV shared memory for cpu %d\n", smp_processor_id());
> }
>
> static void kvm_pv_guest_cpu_reboot(void *unused)
> @@ -469,7 +469,8 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
> } else {
> ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
> (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
> - WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
> + WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
> + ret);
> min = max = apic_id;
> ipi_bitmap = 0;
> }
> @@ -479,7 +480,8 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
> if (ipi_bitmap) {
> ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
> (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
> - WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
> + WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
> + ret);
> }
>
> local_irq_restore(flags);
> @@ -509,7 +511,7 @@ static void kvm_setup_pv_ipi(void)
> {
> apic->send_IPI_mask = kvm_send_ipi_mask;
> apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
> - pr_info("KVM setup pv IPIs\n");
> + pr_info("setup PV IPIs\n");
> }
>
> static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
> @@ -631,11 +633,11 @@ static void __init kvm_guest_init(void)
> !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
> kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
> smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
> - pr_info("KVM setup pv sched yield\n");
> + pr_info("setup PV sched yield\n");
> }
> if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
> kvm_cpu_online, kvm_cpu_down_prepare) < 0)
> - pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
> + pr_err("failed to install cpu hotplug callbacks\n");
> #else
> sev_map_percpu_data();
> kvm_guest_cpu_init();
> @@ -738,7 +740,7 @@ static __init int kvm_setup_pv_tlb_flush(void)
> zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
> GFP_KERNEL, cpu_to_node(cpu));
> }
> - pr_info("KVM setup pv remote TLB flush\n");
> + pr_info("setup PV remote TLB flush\n");
> }
>
> return 0;
> @@ -866,8 +868,8 @@ static void kvm_enable_host_haltpoll(void *i)
> void arch_haltpoll_enable(unsigned int cpu)
> {
> if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
> - pr_err_once("kvm: host does not support poll control\n");
> - pr_err_once("kvm: host upgrade recommended\n");
> + pr_err_once("host does not support poll control\n");
> + pr_err_once("host upgrade recommended\n");
> return;
> }
>
> --
> 1.8.3.1
>
Powered by blists - more mailing lists