[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <849b657fb14e94d5842130bb61d702927a1fde23.camel@intel.com>
Date: Mon, 22 May 2023 12:56:40 +0000
From: "Huang, Kai" <kai.huang@...el.com>
To: "pbonzini@...hat.com" <pbonzini@...hat.com>,
"Christopherson,, Sean" <seanjc@...gle.com>
CC: "kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"Gao, Chao" <chao.gao@...el.com>,
"andrew.cooper3@...rix.com" <andrew.cooper3@...rix.com>
Subject: Re: [PATCH v3 04/18] x86/reboot: KVM: Disable SVM during reboot via
virt/KVM reboot callback
On Fri, 2023-05-12 at 16:50 -0700, Sean Christopherson wrote:
> Use the virt callback to disable SVM (and set GIF=1) during an emergency
> instead of blindly attempting to disable SVM. Like the VMX case, if a
> hypervisor, i.e. KVM, isn't loaded/active, SVM can't be in use.
>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
Acked-by: Kai Huang <kai.huang@...el.com>
> ---
> arch/x86/include/asm/virtext.h | 8 --------
> arch/x86/kernel/reboot.c | 3 ---
> arch/x86/kvm/svm/svm.c | 19 +++++++++++++++++--
> 3 files changed, 17 insertions(+), 13 deletions(-)
>
> diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
> index 5bc29fab15da..aaed66249ccf 100644
> --- a/arch/x86/include/asm/virtext.h
> +++ b/arch/x86/include/asm/virtext.h
> @@ -133,12 +133,4 @@ static inline void cpu_svm_disable(void)
> }
> }
>
> -/** Makes sure SVM is disabled, if it is supported on the CPU
> - */
> -static inline void cpu_emergency_svm_disable(void)
> -{
> - if (cpu_has_svm(NULL))
> - cpu_svm_disable();
> -}
> -
> #endif /* _ASM_X86_VIRTEX_H */
> diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
> index 0cf2261c2dec..92b380e199a3 100644
> --- a/arch/x86/kernel/reboot.c
> +++ b/arch/x86/kernel/reboot.c
> @@ -826,9 +826,6 @@ void cpu_emergency_disable_virtualization(void)
> if (callback)
> callback();
> rcu_read_unlock();
> -
> - /* KVM_AMD doesn't yet utilize the common callback. */
> - cpu_emergency_svm_disable();
> }
>
> #if defined(CONFIG_SMP)
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index eb308c9994f9..0f0d04900bf2 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -38,6 +38,7 @@
> #include <asm/spec-ctrl.h>
> #include <asm/cpu_device_id.h>
> #include <asm/traps.h>
> +#include <asm/reboot.h>
> #include <asm/fpu/api.h>
>
> #include <asm/virtext.h>
> @@ -568,6 +569,11 @@ void __svm_write_tsc_multiplier(u64 multiplier)
> preempt_enable();
> }
>
> +static void svm_emergency_disable(void)
> +{
> + cpu_svm_disable();
> +}
> +
> static void svm_hardware_disable(void)
> {
> /* Make sure we clean up behind us */
> @@ -5184,6 +5190,13 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
> .pmu_ops = &amd_pmu_ops,
> };
>
> +static void __svm_exit(void)
> +{
> + kvm_x86_vendor_exit();
> +
> + cpu_emergency_unregister_virt_callback(svm_emergency_disable);
> +}
> +
> static int __init svm_init(void)
> {
> int r;
> @@ -5197,6 +5210,8 @@ static int __init svm_init(void)
> if (r)
> return r;
>
> + cpu_emergency_register_virt_callback(svm_emergency_disable);
> +
> /*
> * Common KVM initialization _must_ come last, after this, /dev/kvm is
> * exposed to userspace!
> @@ -5209,14 +5224,14 @@ static int __init svm_init(void)
> return 0;
>
> err_kvm_init:
> - kvm_x86_vendor_exit();
> + __svm_exit();
> return r;
> }
>
> static void __exit svm_exit(void)
> {
> kvm_exit();
> - kvm_x86_vendor_exit();
> + __svm_exit();
> }
>
> module_init(svm_init)
> --
> 2.40.1.606.ga4b1b128d6-goog
>
Powered by blists - more mailing lists