[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALMp9eTBPTnsRDipdGDgmugWgfFEjQ2wd_9-JY0ZeM9YG2fBjg@mail.gmail.com>
Date: Wed, 25 Sep 2019 09:47:05 -0700
From: Jim Mattson <jmattson@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: LKML <linux-kernel@...r.kernel.org>,
kvm list <kvm@...r.kernel.org>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Krish Sadhukhan <krish.sadhukhan@...cle.com>
Subject: Re: [PATCH] KVM: nVMX: cleanup and fix host 64-bit mode checks
On Wed, Sep 25, 2019 at 9:34 AM Paolo Bonzini <pbonzini@...hat.com> wrote:
>
> KVM was incorrectly checking vmcs12->host_ia32_efer even if the "load
> IA32_EFER" exit control was reset. Also, some checks were not using
> the new CC macro for tracing.
>
> Cleanup everything so that the vCPU's 64-bit mode is determined
> directly from EFER_LMA and the VMCS checks are based on that, which
> matches section 26.2.4 of the SDM.
>
> Cc: Sean Christopherson <sean.j.christopherson@...el.com>
> Cc: Jim Mattson <jmattson@...gle.com>
> Cc: Krish Sadhukhan <krish.sadhukhan@...cle.com>
> Fixes: 5845038c111db27902bc220a4f70070fe945871c
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/x86/kvm/vmx/nested.c | 53 ++++++++++++++++++++---------------------------
> 1 file changed, 22 insertions(+), 31 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 70d59d9304f2..e108847f6cf8 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -2664,8 +2664,26 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
> CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
> return -EINVAL;
>
> - ia32e = (vmcs12->vm_exit_controls &
> - VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
> +#ifdef CONFIG_X86_64
> + ia32e = !!(vcpu->arch.efer & EFER_LMA);
> +#else
> + if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE))
> + return -EINVAL;
This check is redundant, since it is checked in the else block below.
> +
> + ia32e = false;
> +#endif
> +
> + if (ia32e) {
> + if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) ||
> + CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
> + return -EINVAL;
> + } else {
> + if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) ||
> + CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
> + CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
> + CC(((vmcs12->host_rip) >> 32) & 0xffffffff))
The mask shouldn't be necessary.
> + return -EINVAL;
> + }
>
> if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
> CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
> @@ -2684,35 +2702,8 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
> CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
> CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
> CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
> - CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)))
> - return -EINVAL;
> -
> - if (!(vmcs12->host_ia32_efer & EFER_LMA) &&
> - ((vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
> - (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE))) {
> - return -EINVAL;
> - }
> -
> - if ((vmcs12->host_ia32_efer & EFER_LMA) &&
> - !(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) {
> - return -EINVAL;
> - }
> -
> - if (!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) &&
> - ((vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
> - (vmcs12->host_cr4 & X86_CR4_PCIDE) ||
> - (((vmcs12->host_rip) >> 32) & 0xffffffff))) {
> - return -EINVAL;
> - }
> -
> - if ((vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) &&
> - ((!(vmcs12->host_cr4 & X86_CR4_PAE)) ||
> - (is_noncanonical_address(vmcs12->host_rip, vcpu)))) {
> - return -EINVAL;
> - }
> -#else
> - if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE ||
> - vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
> + CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
> + CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
> return -EINVAL;
> #endif
>
> --
> 1.8.3.1
>
Reviewed-by: Jim Mattson <jmattson@...gle.com>
Powered by blists - more mailing lists