[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87zhmcfo0w.fsf@vitty.brq.redhat.com>
Date: Thu, 20 Jun 2019 14:18:39 +0200
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: Aaron Lewis <aaronlewis@...gle.com>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Subject: Re: [PATCH] KVM: nVMX: reorganize initial steps of vmx_set_nested_state
Paolo Bonzini <pbonzini@...hat.com> writes:
> Commit 332d079735f5 ("KVM: nVMX: KVM_SET_NESTED_STATE - Tear down old EVMCS
> state before setting new state", 2019-05-02) broke evmcs_test because the
> eVMCS setup must be performed even if there is no VMXON region defined,
> as long as the eVMCS bit is set in the assist page.
>
> While the simplest possible fix would be to add a check on
> kvm_state->flags & KVM_STATE_NESTED_EVMCS in the initial "if" that
> covers kvm_state->hdr.vmx.vmxon_pa == -1ull, that is quite ugly.
>
> Instead, this patch moves checks earlier in the function and
> conditionalizes them on kvm_state->hdr.vmx.vmxon_pa, so that
> vmx_set_nested_state always goes through vmx_leave_nested
> and nested_enable_evmcs.
>
> Fixes: 332d079735f5
checkpatch.pl will likely complain here asking for full description, e.g.
Fixes: 332d079735f5 ("KVM: nVMX: KVM_SET_NESTED_STATE - Tear down old EVMCS state before setting new state")
There's also something wrong with the patch as it fails to apply because
of (not only?) whitespace issues or maybe I'm just applying it to the
wrong tree...
> Cc: Aaron Lewis <aaronlewis@...gle.com>
> Cc: Vitaly Kuznetsov <vkuznets@...hat.com>
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
Enlightened VMCS migration is just a 'theoretical' feature atm, we don't
know if it actually works but it's good we have a selftest for it so we
know when it definitely doesn't :-)
Reviewed-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> ---
> arch/x86/kvm/vmx/nested.c | 26 ++++++++++--------
> .../kvm/x86_64/vmx_set_nested_state_test.c | 32 ++++++++++++++--------
> 2 files changed, 35 insertions(+), 23 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index fb6d1f7b43f3..5f9c1a200201 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -5343,9 +5343,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
> if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
> return -EINVAL;
>
> - if (!nested_vmx_allowed(vcpu))
> - return kvm_state->hdr.vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
> -
> if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
> if (kvm_state->hdr.vmx.smm.flags)
> return -EINVAL;
> @@ -5353,12 +5350,15 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
> if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
> return -EINVAL;
>
> - vmx_leave_nested(vcpu);
> - return 0;
> - }
> + if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
> + return -EINVAL;
> + } else {
> + if (!nested_vmx_allowed(vcpu))
> + return -EINVAL;
>
> - if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
> - return -EINVAL;
> + if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
> + return -EINVAL;
> + }
>
> if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
> (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
> @@ -5381,11 +5381,15 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
> return -EINVAL;
>
> vmx_leave_nested(vcpu);
> - if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
> - return 0;
> + if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
> + if (!nested_vmx_allowed(vcpu))
> + return -EINVAL;
>
> - if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
> nested_enable_evmcs(vcpu, NULL);
> + }
> +
> + if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
> + return 0;
>
> vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
> ret = enter_vmx_operation(vcpu);
> diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
> index 0648fe6df5a8..e64ca20b315a 100644
> --- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
> @@ -123,36 +123,44 @@ void test_vmx_nested_state(struct kvm_vm *vm)
> /*
> * We cannot virtualize anything if the guest does not have VMX
> * enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa
> - * is set to -1ull.
> + * is set to -1ull, but the flags must be zero.
> */
> set_default_vmx_state(state, state_sz);
> state->hdr.vmx.vmxon_pa = -1ull;
> + test_nested_state_expect_einval(vm, state);
> +
> + state->hdr.vmx.vmcs12_pa = -1ull;
> + state->flags = KVM_STATE_NESTED_EVMCS;
> + test_nested_state_expect_einval(vm, state);
> +
> + state->flags = 0;
> test_nested_state(vm, state);
>
> /* Enable VMX in the guest CPUID. */
> vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
>
> - /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
> + /*
> + * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
> + * setting the nested state but flags other than eVMCS must be clear.
> + */
> set_default_vmx_state(state, state_sz);
> state->hdr.vmx.vmxon_pa = -1ull;
> + state->hdr.vmx.vmcs12_pa = -1ull;
> + test_nested_state_expect_einval(vm, state);
> +
> + state->flags = KVM_STATE_NESTED_EVMCS;
> + test_nested_state(vm, state);
> +
> + /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
> state->hdr.vmx.smm.flags = 1;
> test_nested_state_expect_einval(vm, state);
>
> /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
> set_default_vmx_state(state, state_sz);
> state->hdr.vmx.vmxon_pa = -1ull;
> - state->hdr.vmx.vmcs12_pa = 0;
> + state->flags = 0;
> test_nested_state_expect_einval(vm, state);
>
> - /*
> - * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
> - * setting the nested state.
> - */
> - set_default_vmx_state(state, state_sz);
> - state->hdr.vmx.vmxon_pa = -1ull;
> - state->hdr.vmx.vmcs12_pa = -1ull;
> - test_nested_state(vm, state);
> -
> /* It is invalid to have vmxon_pa set to a non-page aligned address. */
> set_default_vmx_state(state, state_sz);
> state->hdr.vmx.vmxon_pa = 1;
--
Vitaly
Powered by blists - more mailing lists