[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220504224914.1654036-72-seanjc@google.com>
Date: Wed, 4 May 2022 22:48:17 +0000
From: Sean Christopherson <seanjc@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Andrew Jones <drjones@...hat.com>,
David Matlack <dmatlack@...gle.com>,
Ben Gardon <bgardon@...gle.com>,
Oliver Upton <oupton@...gle.com>,
Sean Christopherson <seanjc@...gle.com>
Subject: [PATCH 071/128] KVM: selftests: Convert evmcs_test away from VCPU_ID
Convert evmcs_test to use vm_create_with_one_vcpu() and pass around a
'struct kvm_vcpu' object instead of using a global VCPU_ID. Note, this is
a "functional" change in the sense that the test now creates a vCPU with
vcpu_id==0 instead of vcpu_id==5. The non-zero VCPU_ID was 100% arbitrary
and added little to no validation coverage. If testing non-zero vCPU IDs
is desirable for generic tests, that can be done in the future by tweaking
the VM creation helpers.
Opportunistically use vcpu_run() instead of _vcpu_run(), the test expects
KVM_RUN to succeed.
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
.../testing/selftests/kvm/x86_64/evmcs_test.c | 51 ++++++++++---------
1 file changed, 26 insertions(+), 25 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index f97049ab045f..dc7c1eb28fd4 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -18,7 +18,6 @@
#include "vmx.h"
-#define VCPU_ID 5
#define NMI_VECTOR 2
static int ud_count;
@@ -160,55 +159,56 @@ void guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE();
}
-void inject_nmi(struct kvm_vm *vm)
+void inject_nmi(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_events events;
- vcpu_events_get(vm, VCPU_ID, &events);
+ vcpu_events_get(vcpu->vm, vcpu->id, &events);
events.nmi.pending = 1;
events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
- vcpu_events_set(vm, VCPU_ID, &events);
+ vcpu_events_set(vcpu->vm, vcpu->id, &events);
}
-static void save_restore_vm(struct kvm_vm *vm)
+static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
+ struct kvm_vcpu *vcpu)
{
struct kvm_regs regs1, regs2;
struct kvm_x86_state *state;
- state = vcpu_save_state(vm, VCPU_ID);
+ state = vcpu_save_state(vm, vcpu->id);
memset(®s1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, ®s1);
+ vcpu_regs_get(vm, vcpu->id, ®s1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
- kvm_vm_restart(vm);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_hv_cpuid(vm, VCPU_ID);
- vcpu_enable_evmcs(vm, VCPU_ID);
- vcpu_load_state(vm, VCPU_ID, state);
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_set_hv_cpuid(vm, vcpu->id);
+ vcpu_enable_evmcs(vm, vcpu->id);
+ vcpu_load_state(vm, vcpu->id, state);
kvm_x86_state_cleanup(state);
memset(®s2, 0, sizeof(regs2));
- vcpu_regs_get(vm, VCPU_ID, ®s2);
+ vcpu_regs_get(vm, vcpu->id, ®s2);
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
+ return vcpu;
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva = 0;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
int stage;
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
if (!nested_vmx_supported() ||
!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
@@ -217,28 +217,29 @@ int main(int argc, char *argv[])
exit(KSFT_SKIP);
}
- vcpu_set_hv_cpuid(vm, VCPU_ID);
- vcpu_enable_evmcs(vm, VCPU_ID);
+ vcpu_set_hv_cpuid(vm, vcpu->id);
+ vcpu_enable_evmcs(vm, vcpu->id);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vm, vcpu->id);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
pr_info("Running L1 which uses EVMCS to run L2\n");
for (stage = 1;; stage++) {
- run = vcpu_state(vm, VCPU_ID);
- _vcpu_run(vm, VCPU_ID);
+ run = vcpu->run;
+
+ vcpu_run(vm, vcpu->id);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vm, vcpu->id, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]);
@@ -256,12 +257,12 @@ int main(int argc, char *argv[])
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
- save_restore_vm(vm);
+ vcpu = save_restore_vm(vm, vcpu);
/* Force immediate L2->L1 exit before resuming */
if (stage == 8) {
pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
- inject_nmi(vm);
+ inject_nmi(vcpu);
}
/*
@@ -271,7 +272,7 @@ int main(int argc, char *argv[])
*/
if (stage == 9) {
pr_info("Trying extra KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE cycle\n");
- save_restore_vm(vm);
+ vcpu = save_restore_vm(vm, vcpu);
}
}
--
2.36.0.464.gb9c8b46e94-goog
Powered by blists - more mailing lists