[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87y2pmsg8w.fsf@vitty.brq.redhat.com>
Date: Wed, 20 May 2020 18:50:07 +0200
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: Paolo Bonzini <pbonzini@...hat.com>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Cc: oupton@...gle.com
Subject: Re: [PATCH 1/3] selftests: kvm: add a SVM version of state-test
Paolo Bonzini <pbonzini@...hat.com> writes:
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> .../testing/selftests/kvm/x86_64/state_test.c | 65 ++++++++++++++++---
> 1 file changed, 55 insertions(+), 10 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
> index 5b1a016edf55..1c5216f1ef0a 100644
> --- a/tools/testing/selftests/kvm/x86_64/state_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/state_test.c
> @@ -18,10 +18,42 @@
> #include "kvm_util.h"
> #include "processor.h"
> #include "vmx.h"
> +#include "svm_util.h"
>
> #define VCPU_ID 5
> +#define L2_GUEST_STACK_SIZE 64
> +
> +void svm_l2_guest_code(void)
> +{
> + GUEST_SYNC(4);
> + /* Exit to L1 */
> + vmcall();
> + GUEST_SYNC(6);
> + /* Done, exit to L1 and never come back. */
> + vmcall();
> +}
> +
> +static void svm_l1_guest_code(struct svm_test_data *svm)
> +{
> + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
> + struct vmcb *vmcb = svm->vmcb;
Nit: indentation
> +
> + GUEST_ASSERT(svm->vmcb_gpa);
> + /* Prepare for L2 execution. */
> + generic_svm_setup(svm, svm_l2_guest_code,
> + &l2_guest_stack[L2_GUEST_STACK_SIZE]);
> +
> + GUEST_SYNC(3);
> + run_guest(vmcb, svm->vmcb_gpa);
> + GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
> + GUEST_SYNC(5);
> + vmcb->save.rip += 3;
> + run_guest(vmcb, svm->vmcb_gpa);
> + GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
> + GUEST_SYNC(7);
> +}
>
> -void l2_guest_code(void)
> +void vmx_l2_guest_code(void)
> {
> GUEST_SYNC(6);
>
> @@ -42,9 +74,8 @@ void l2_guest_code(void)
> vmcall();
> }
>
> -void l1_guest_code(struct vmx_pages *vmx_pages)
> +static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
> {
> -#define L2_GUEST_STACK_SIZE 64
> unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
>
> GUEST_ASSERT(vmx_pages->vmcs_gpa);
> @@ -56,7 +87,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
> GUEST_SYNC(4);
> GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
>
> - prepare_vmcs(vmx_pages, l2_guest_code,
> + prepare_vmcs(vmx_pages, vmx_l2_guest_code,
> &l2_guest_stack[L2_GUEST_STACK_SIZE]);
>
> GUEST_SYNC(5);
> @@ -106,20 +137,31 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
> GUEST_ASSERT(vmresume());
> }
>
> -void guest_code(struct vmx_pages *vmx_pages)
> +static u32 cpuid_ecx(u32 eax)
> +{
> + u32 result;
> + asm volatile("cpuid" : "=c" (result) : "a" (eax));
Nit: doesn't cpuid clobber ebx/edx too? (and ecx also works as
input). I'd suggest we write correct implementation and put it to
the library (or find a way to use native_cpuid() from
arch/x86/include/asm/processor.h)
> + return result;
> +}
> +
> +static void __attribute__((__flatten__)) guest_code(void *arg)
> {
> GUEST_SYNC(1);
> GUEST_SYNC(2);
>
> - if (vmx_pages)
> - l1_guest_code(vmx_pages);
> + if (arg) {
> + if (cpuid_ecx(0x80000001) & CPUID_SVM)
> + svm_l1_guest_code(arg);
> + else
> + vmx_l1_guest_code(arg);
> + }
>
> GUEST_DONE();
> }
>
> int main(int argc, char *argv[])
> {
> - vm_vaddr_t vmx_pages_gva = 0;
> + vm_vaddr_t nested_gva = 0;
>
> struct kvm_regs regs1, regs2;
> struct kvm_vm *vm;
> @@ -136,8 +178,11 @@ int main(int argc, char *argv[])
> vcpu_regs_get(vm, VCPU_ID, ®s1);
>
> if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
> - vcpu_alloc_vmx(vm, &vmx_pages_gva);
> - vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
> + if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM)
> + vcpu_alloc_svm(vm, &nested_gva);
> + else
> + vcpu_alloc_vmx(vm, &nested_gva);
> + vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
> } else {
> pr_info("will skip nested state checks\n");
> vcpu_args_set(vm, VCPU_ID, 1, 0);
With two nitpicks above,
Reviewed-by: Vitaly Kuznetsov <vkuznets@...hat.com>
--
Vitaly
Powered by blists - more mailing lists