[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b279981a9e49539ae3c18ace9c49042771e15eaa.camel@redhat.com>
Date: Mon, 10 May 2021 11:25:57 +0300
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>
Cc: Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, Xiaoyao Li <xiaoyao.li@...el.com>,
Reiji Watanabe <reijiw@...gle.com>
Subject: Re: [PATCH 10/15] KVM: VMX: Use common x86's uret MSR list as the
one true list
On Tue, 2021-05-04 at 10:17 -0700, Sean Christopherson wrote:
> Drop VMX's global list of user return MSRs now that VMX doesn't resort said
> list to isolate "active" MSRs, i.e. now that VMX's list and x86's list have
> the same MSRs in the same order.
>
> In addition to eliminating the redundant list, this will also allow moving
> more of the list management into common x86.
>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> arch/x86/include/asm/kvm_host.h | 1 +
> arch/x86/kvm/vmx/vmx.c | 97 ++++++++++++++-------------------
> arch/x86/kvm/x86.c | 12 ++++
> 3 files changed, 53 insertions(+), 57 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index a02c9bf3f7f1..c9452472ed55 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1778,6 +1778,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
> unsigned long icr, int op_64_bit);
>
> void kvm_define_user_return_msr(unsigned index, u32 msr);
> +int kvm_find_user_return_msr(u32 msr);
> int kvm_probe_user_return_msr(u32 msr);
> int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
>
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 6caabcd5037e..4b432d2bbd06 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -454,26 +454,7 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
>
> static unsigned long host_idt_base;
>
> -/*
> - * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
> - * will emulate SYSCALL in legacy mode if the vendor string in guest
> - * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
> - * support this emulation, MSR_STAR is included in the list for i386,
> - * but is never loaded into hardware. MSR_CSTAR is also never loaded
> - * into hardware and is here purely for emulation purposes.
> - */
> -static u32 vmx_uret_msrs_list[] = {
> -#ifdef CONFIG_X86_64
> - MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
> -#endif
> - MSR_EFER, MSR_TSC_AUX, MSR_STAR,
> - MSR_IA32_TSX_CTRL,
> -};
> -
> -/*
> - * Number of user return MSRs that are actually supported in hardware.
> - * vmx_uret_msrs_list is modified when KVM is loaded to drop unsupported MSRs.
> - */
> +/* Number of user return MSRs that are actually supported in hardware. */
> static int vmx_nr_uret_msrs;
>
> #if IS_ENABLED(CONFIG_HYPERV)
> @@ -703,22 +684,11 @@ static bool is_valid_passthrough_msr(u32 msr)
> return r;
> }
>
> -static inline int __vmx_find_uret_msr(u32 msr)
> -{
> - int i;
> -
> - for (i = 0; i < vmx_nr_uret_msrs; ++i) {
> - if (vmx_uret_msrs_list[i] == msr)
> - return i;
> - }
> - return -1;
> -}
> -
> struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
> {
> int i;
>
> - i = __vmx_find_uret_msr(msr);
> + i = kvm_find_user_return_msr(msr);
> if (i >= 0)
> return &vmx->guest_uret_msrs[i];
> return NULL;
> @@ -1086,7 +1056,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx)
> return false;
> }
>
> - i = __vmx_find_uret_msr(MSR_EFER);
> + i = kvm_find_user_return_msr(MSR_EFER);
> if (i < 0)
> return false;
>
> @@ -6922,6 +6892,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
>
> static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
> {
> + struct vmx_uret_msr *tsx_ctrl;
> struct vcpu_vmx *vmx;
> int i, cpu, err;
>
> @@ -6946,29 +6917,25 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
>
> for (i = 0; i < vmx_nr_uret_msrs; ++i) {
> vmx->guest_uret_msrs[i].data = 0;
> -
> - switch (vmx_uret_msrs_list[i]) {
> - case MSR_IA32_TSX_CTRL:
> - /*
> - * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
> - * interception. Keep the host value unchanged to avoid
> - * changing CPUID bits under the host kernel's feet.
> - *
> - * hle=0, rtm=0, tsx_ctrl=1 can be found with some
> - * combinations of new kernel and old userspace. If
> - * those guests run on a tsx=off host, do allow guests
> - * to use TSX_CTRL, but do not change the value on the
> - * host so that TSX remains always disabled.
> - */
> - if (boot_cpu_has(X86_FEATURE_RTM))
> - vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
> - else
> - vmx->guest_uret_msrs[i].mask = 0;
> - break;
> - default:
> - vmx->guest_uret_msrs[i].mask = -1ull;
> - break;
> - }
> + vmx->guest_uret_msrs[i].mask = -1ull;
> + }
> + tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
> + if (tsx_ctrl) {
> + /*
> + * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
> + * Keep the host value unchanged to avoid changing CPUID bits
> + * under the host kernel's feet.
> + *
> + * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations
> + * of new kernel and old userspace. If those guests run on a
> + * tsx=off host, do allow guests to use TSX_CTRL, but do not
> + * change the value on the host so that TSX remains always
> + * disabled.
> + */
> + if (boot_cpu_has(X86_FEATURE_RTM))
> + vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
> + else
> + vmx->guest_uret_msrs[i].mask = 0;
> }
>
> err = alloc_loaded_vmcs(&vmx->vmcs01);
> @@ -7829,6 +7796,22 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
>
> static __init void vmx_setup_user_return_msrs(void)
> {
> +
> + /*
> + * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
> + * will emulate SYSCALL in legacy mode if the vendor string in guest
> + * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
> + * support this emulation, MSR_STAR is included in the list for i386,
> + * but is never loaded into hardware. MSR_CSTAR is also never loaded
> + * into hardware and is here purely for emulation purposes.
> + */
> + const u32 vmx_uret_msrs_list[] = {
> + #ifdef CONFIG_X86_64
> + MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
> + #endif
> + MSR_EFER, MSR_TSC_AUX, MSR_STAR,
> + MSR_IA32_TSX_CTRL,
> + };
> u32 msr;
> int i;
>
> @@ -7841,7 +7824,7 @@ static __init void vmx_setup_user_return_msrs(void)
> continue;
>
> kvm_define_user_return_msr(vmx_nr_uret_msrs, msr);
> - vmx_uret_msrs_list[vmx_nr_uret_msrs++] = msr;
> + vmx_nr_uret_msrs++;
> }
> }
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b4516d303413..90ef340565a4 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -364,6 +364,18 @@ void kvm_define_user_return_msr(unsigned slot, u32 msr)
> }
> EXPORT_SYMBOL_GPL(kvm_define_user_return_msr);
>
> +int kvm_find_user_return_msr(u32 msr)
> +{
> + int i;
> +
> + for (i = 0; i < user_return_msrs_global.nr; ++i) {
> + if (user_return_msrs_global.msrs[i] == msr)
> + return i;
> + }
> + return -1;
> +}
> +EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
> +
> static void kvm_user_return_msr_cpu_online(void)
> {
> unsigned int cpu = smp_processor_id();
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>
Best regards,
Maxim Levitsky <mlevitsk@...hat.com>
Powered by blists - more mailing lists