lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 07 Jul 2021 13:35:52 +0300
From:   Maxim Levitsky <mlevitsk@...hat.com>
To:     Vitaly Kuznetsov <vkuznets@...hat.com>, kvm@...r.kernel.org,
        Paolo Bonzini <pbonzini@...hat.com>
Cc:     Sean Christopherson <seanjc@...gle.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Cathy Avery <cavery@...hat.com>,
        Emanuele Giuseppe Esposito <eesposit@...hat.com>,
        Tom Lendacky <thomas.lendacky@....com>,
        Michael Roth <mdroth@...ux.vnet.ibm.com>,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH 6/6] KVM: selftests: smm_test: Test SMM enter from L2

On Mon, 2021-06-28 at 12:44 +0200, Vitaly Kuznetsov wrote:
> Two additional tests are added:
> - SMM triggered from L2 does not currupt L1 host state.
> - Save/restore during SMM triggered from L2 does not corrupt guest/host
>   state.
> 
> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> ---
>  tools/testing/selftests/kvm/x86_64/smm_test.c | 70 +++++++++++++++++--
>  1 file changed, 64 insertions(+), 6 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
> index c1f831803ad2..d0fe2fdce58c 100644
> --- a/tools/testing/selftests/kvm/x86_64/smm_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
> @@ -53,15 +53,28 @@ static inline void sync_with_host(uint64_t phase)
>  		     : "+a" (phase));
>  }
>  
> -void self_smi(void)
> +static void self_smi(void)
>  {
>  	x2apic_write_reg(APIC_ICR,
>  			 APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
>  }
>  
> -void guest_code(void *arg)
> +static void l2_guest_code(void)
>  {
> +	sync_with_host(8);
> +
> +	sync_with_host(10);
> +
> +	vmcall();
> +}
> +
> +static void guest_code(void *arg)
> +{
> +	#define L2_GUEST_STACK_SIZE 64
> +	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
>  	uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
> +	struct svm_test_data *svm = arg;
> +	struct vmx_pages *vmx_pages = arg;
>  
>  	sync_with_host(1);
>  
> @@ -74,21 +87,50 @@ void guest_code(void *arg)
>  	sync_with_host(4);
>  
>  	if (arg) {
> -		if (cpu_has_svm())
> -			generic_svm_setup(arg, NULL, NULL);
> -		else
> -			GUEST_ASSERT(prepare_for_vmx_operation(arg));
> +		if (cpu_has_svm()) {
> +			generic_svm_setup(svm, l2_guest_code,
> +					  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
> +		} else {
> +			GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
> +			GUEST_ASSERT(load_vmcs(vmx_pages));
> +			prepare_vmcs(vmx_pages, l2_guest_code,
> +				     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
> +		}
>  
>  		sync_with_host(5);
>  
>  		self_smi();
>  
>  		sync_with_host(7);
> +
> +		if (cpu_has_svm()) {
> +			run_guest(svm->vmcb, svm->vmcb_gpa);
> +			svm->vmcb->save.rip += 3;
> +			run_guest(svm->vmcb, svm->vmcb_gpa);
> +		} else {
> +			vmlaunch();
> +			vmresume();
> +		}
> +
> +		/* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
> +		sync_with_host(12);
>  	}
>  
>  	sync_with_host(DONE);
>  }
>  
> +void inject_smi(struct kvm_vm *vm)
> +{
> +	struct kvm_vcpu_events events;
> +
> +	vcpu_events_get(vm, VCPU_ID, &events);
> +
> +	events.smi.pending = 1;
> +	events.flags |= KVM_VCPUEVENT_VALID_SMM;
> +
> +	vcpu_events_set(vm, VCPU_ID, &events);
> +}
> +
>  int main(int argc, char *argv[])
>  {
>  	vm_vaddr_t nested_gva = 0;
> @@ -147,6 +189,22 @@ int main(int argc, char *argv[])
>  			    "Unexpected stage: #%x, got %x",
>  			    stage, stage_reported);
>  
> +		/*
> +		 * Enter SMM during L2 execution and check that we correctly
> +		 * return from it. Do not perform save/restore while in SMM yet.
> +		 */
> +		if (stage == 8) {
> +			inject_smi(vm);
> +			continue;
> +		}
> +
> +		/*
> +		 * Perform save/restore while the guest is in SMM triggered
> +		 * during L2 execution.
> +		 */
> +		if (stage == 10)
> +			inject_smi(vm);
> +
>  		state = vcpu_save_state(vm, VCPU_ID);
>  		kvm_vm_release(vm);
>  		kvm_vm_restart(vm, O_RDWR);

Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>

Best regards,
	Maxim Levitsky

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ