[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e50cb6a3-04df-0a81-c68e-00901e3cba63@redhat.com>
Date: Tue, 6 Jul 2021 14:33:15 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: isaku.yamahata@...el.com, Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
"H . Peter Anvin" <hpa@...or.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, erdemaktas@...gle.com,
Connor Kuehl <ckuehl@...hat.com>,
Sean Christopherson <seanjc@...gle.com>, x86@...nel.org,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: isaku.yamahata@...il.com
Subject: Re: [RFC PATCH v2 02/69] KVM: X86: move kvm_cpu_vmxon() from vmx.c to
virtext.h
On 03/07/21 00:04, isaku.yamahata@...el.com wrote:
> From: Isaku Yamahata <isaku.yamahata@...el.com>
>
> This is preparatory clean up for TDX support.
> Move out kvm_cpu_vmxon() from vmx.c to virtext.h with rename to
> vcpu_vmxon(). Which will be used outside of kvm.
> SEAMLDER to load TDX module which occurs at kernel early boot phase.
> As bonus, this also increases the symetry with cpu_vmxoff().
>
> No functional change is intended.
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> ---
> arch/x86/include/asm/virtext.h | 25 +++++++++++++++++++++++++
> arch/x86/kvm/vmx/vmx.c | 22 +---------------------
> 2 files changed, 26 insertions(+), 21 deletions(-)
>
> diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
> index 8757078d4442..9234b85dac24 100644
> --- a/arch/x86/include/asm/virtext.h
> +++ b/arch/x86/include/asm/virtext.h
> @@ -30,6 +30,31 @@ static inline int cpu_has_vmx(void)
> }
>
>
> +/**
> + * cpu_vmxon() - Enable VMX on the current CPU
> + *
> + * Set CR4.VMXE and enable VMX
> + */
> +static inline int cpu_vmxon(u64 vmxon_pointer)
> +{
> + u64 msr;
> +
> + cr4_set_bits(X86_CR4_VMXE);
> +
> + asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
> + _ASM_EXTABLE(1b, %l[fault])
> + : : [vmxon_pointer] "m"(vmxon_pointer)
> + : : fault);
> + return 0;
> +
> +fault:
> + WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
> + rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
> + cr4_clear_bits(X86_CR4_VMXE);
> +
> + return -EFAULT;
> +}
> +
> /**
> * cpu_vmxoff() - Disable VMX on the current CPU
> *
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index c2a779b688e6..d73ba7a6ff8d 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -2376,26 +2376,6 @@ static __init int vmx_disabled_by_bios(void)
> !boot_cpu_has(X86_FEATURE_VMX);
> }
>
> -static int kvm_cpu_vmxon(u64 vmxon_pointer)
> -{
> - u64 msr;
> -
> - cr4_set_bits(X86_CR4_VMXE);
> -
> - asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
> - _ASM_EXTABLE(1b, %l[fault])
> - : : [vmxon_pointer] "m"(vmxon_pointer)
> - : : fault);
> - return 0;
> -
> -fault:
> - WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
> - rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
> - cr4_clear_bits(X86_CR4_VMXE);
> -
> - return -EFAULT;
> -}
> -
> static int hardware_enable(void)
> {
> int cpu = raw_smp_processor_id();
> @@ -2415,7 +2395,7 @@ static int hardware_enable(void)
>
> intel_pt_handle_vmx(1);
>
> - r = kvm_cpu_vmxon(phys_addr);
> + r = cpu_vmxon(phys_addr);
> if (r) {
> intel_pt_handle_vmx(0);
> return r;
>
Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
Powered by blists - more mailing lists