lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4b0f9fbea71bd779164e8a703f94fb68ae0d43df.camel@redhat.com>
Date:   Mon, 24 May 2021 15:36:15 +0300
From:   Maxim Levitsky <mlevitsk@...hat.com>
To:     Vitaly Kuznetsov <vkuznets@...hat.com>, kvm@...r.kernel.org,
        Paolo Bonzini <pbonzini@...hat.com>
Cc:     Sean Christopherson <seanjc@...gle.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 7/7] KVM: selftests: evmcs_test: Test that
 KVM_STATE_NESTED_EVMCS is never lost

On Mon, 2021-05-17 at 15:50 +0200, Vitaly Kuznetsov wrote:
> Do KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE for a freshly restored VM
> (before the first KVM_RUN) to check that KVM_STATE_NESTED_EVMCS is not
> lost.
> 
> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> ---
>  .../testing/selftests/kvm/x86_64/evmcs_test.c | 64 +++++++++++--------
>  1 file changed, 38 insertions(+), 26 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
> index 63096cea26c6..fcef347a681a 100644
> --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
> @@ -121,14 +121,38 @@ void inject_nmi(struct kvm_vm *vm)
>  	vcpu_events_set(vm, VCPU_ID, &events);
>  }
>  
> +static void save_restore_vm(struct kvm_vm *vm)
> +{
> +	struct kvm_regs regs1, regs2;
> +	struct kvm_x86_state *state;
> +
> +	state = vcpu_save_state(vm, VCPU_ID);
> +	memset(&regs1, 0, sizeof(regs1));
> +	vcpu_regs_get(vm, VCPU_ID, &regs1);
> +
> +	kvm_vm_release(vm);
> +
> +	/* Restore state in a new VM.  */
> +	kvm_vm_restart(vm, O_RDWR);
> +	vm_vcpu_add(vm, VCPU_ID);
> +	vcpu_set_hv_cpuid(vm, VCPU_ID);
> +	vcpu_enable_evmcs(vm, VCPU_ID);
> +	vcpu_load_state(vm, VCPU_ID, state);
> +	free(state);
> +
> +	memset(&regs2, 0, sizeof(regs2));
> +	vcpu_regs_get(vm, VCPU_ID, &regs2);
> +	TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
> +		    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
> +		    (ulong) regs2.rdi, (ulong) regs2.rsi);
> +}
> +
>  int main(int argc, char *argv[])
>  {
>  	vm_vaddr_t vmx_pages_gva = 0;
>  
> -	struct kvm_regs regs1, regs2;
>  	struct kvm_vm *vm;
>  	struct kvm_run *run;
> -	struct kvm_x86_state *state;
>  	struct ucall uc;
>  	int stage;
>  
> @@ -145,10 +169,6 @@ int main(int argc, char *argv[])
>  	vcpu_set_hv_cpuid(vm, VCPU_ID);
>  	vcpu_enable_evmcs(vm, VCPU_ID);
>  
> -	run = vcpu_state(vm, VCPU_ID);
> -
> -	vcpu_regs_get(vm, VCPU_ID, &regs1);
> -
>  	vcpu_alloc_vmx(vm, &vmx_pages_gva);
>  	vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
>  
> @@ -160,6 +180,7 @@ int main(int argc, char *argv[])
>  	pr_info("Running L1 which uses EVMCS to run L2\n");
>  
>  	for (stage = 1;; stage++) {
> +		run = vcpu_state(vm, VCPU_ID);
>  		_vcpu_run(vm, VCPU_ID);
>  		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
>  			    "Stage %d: unexpected exit reason: %u (%s),\n",
> @@ -184,32 +205,23 @@ int main(int argc, char *argv[])
>  			    uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
>  			    stage, (ulong)uc.args[1]);
>  
> -		state = vcpu_save_state(vm, VCPU_ID);
> -		memset(&regs1, 0, sizeof(regs1));
> -		vcpu_regs_get(vm, VCPU_ID, &regs1);
> -
> -		kvm_vm_release(vm);
> -
> -		/* Restore state in a new VM.  */
> -		kvm_vm_restart(vm, O_RDWR);
> -		vm_vcpu_add(vm, VCPU_ID);
> -		vcpu_set_hv_cpuid(vm, VCPU_ID);
> -		vcpu_enable_evmcs(vm, VCPU_ID);
> -		vcpu_load_state(vm, VCPU_ID, state);
> -		run = vcpu_state(vm, VCPU_ID);
> -		free(state);
> -
> -		memset(&regs2, 0, sizeof(regs2));
> -		vcpu_regs_get(vm, VCPU_ID, &regs2);
> -		TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
> -			    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
> -			    (ulong) regs2.rdi, (ulong) regs2.rsi);
> +		save_restore_vm(vm);
>  
>  		/* Force immediate L2->L1 exit before resuming */
>  		if (stage == 8) {
>  			pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
>  			inject_nmi(vm);
>  		}
> +
> +		/*
> +		 * Do KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE for a freshly
> +		 * restored VM (before the first KVM_RUN) to check that
> +		 * KVM_STATE_NESTED_EVMCS is not lost.
> +		 */
> +		if (stage == 9) {
> +			pr_info("Trying extra KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE cycle\n");
> +			save_restore_vm(vm);
> +		}
>  	}
>  
>  done:


This is a very good test. I do think that in the future we should move save_restore_vm
to common code so that I could test SVM nested migration (and plain VMX nested migration) 
in a similar way.

Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>

Best regards,
	Maxim Levitsky



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ