[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250228093024.114983-23-Neeraj.Upadhyay@amd.com>
Date: Fri, 28 Feb 2025 15:00:15 +0530
From: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
To: <kvm@...r.kernel.org>, <seanjc@...gle.com>, <pbonzini@...hat.com>
CC: <linux-kernel@...r.kernel.org>, <Thomas.Lendacky@....com>,
<nikunj@....com>, <Santosh.Shukla@....com>, <Vasant.Hegde@....com>,
<Suravee.Suthikulpanit@....com>, <bp@...en8.de>, <David.Kaplan@....com>,
<huibo.wang@....com>, <naveen.rao@....com>, <pgonda@...gle.com>,
<linux-kselftest@...r.kernel.org>, <shuah@...nel.org>
Subject: [RFC PATCH 22/31] KVM: selftests: Add args param to kvm_arch_vm_post_create()
Add provision to pass custom args to kvm_arch_vm_post_create().
This will be used to pass sev init args (vmsa features) for SEV VMs.
Signed-off-by: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
---
.../testing/selftests/kvm/include/kvm_util.h | 9 +++-
tools/testing/selftests/kvm/include/x86/sev.h | 3 ++
tools/testing/selftests/kvm/lib/kvm_util.c | 51 +++++++++++++------
.../testing/selftests/kvm/lib/x86/processor.c | 6 ++-
tools/testing/selftests/kvm/lib/x86/sev.c | 13 +++--
tools/testing/selftests/kvm/s390/cmma_test.c | 2 +-
6 files changed, 62 insertions(+), 22 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index a160e1ac7cbc..7f97bade5797 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -901,6 +901,9 @@ static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
struct kvm_vm *____vm_create(struct vm_shape shape);
struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
uint64_t nr_extra_pages);
+struct kvm_vm *__vm_create_with_args(struct vm_shape shape,
+ uint32_t nr_runnable_vcpus, uint64_t nr_extra_pages,
+ void *args);
static inline struct kvm_vm *vm_create_barebones(void)
{
@@ -925,6 +928,10 @@ static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
uint64_t extra_mem_pages,
void *guest_code, struct kvm_vcpu *vcpus[]);
+struct kvm_vm *___vm_create_with_vcpus(struct vm_shape shape,
+ uint32_t nr_vcpus, uint64_t extra_mem_pages,
+ void *guest_code, struct kvm_vcpu *vcpus[],
+ void *args);
static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
void *guest_code,
@@ -1141,7 +1148,7 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
*/
void kvm_selftest_arch_init(void);
-void kvm_arch_vm_post_create(struct kvm_vm *vm);
+void kvm_arch_vm_post_create(struct kvm_vm *vm, void *args);
bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
diff --git a/tools/testing/selftests/kvm/include/x86/sev.h b/tools/testing/selftests/kvm/include/x86/sev.h
index 3756805197c3..ffb5ded0a35a 100644
--- a/tools/testing/selftests/kvm/include/x86/sev.h
+++ b/tools/testing/selftests/kvm/include/x86/sev.h
@@ -52,6 +52,9 @@ void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy);
void snp_vm_launch_update(struct kvm_vm *vm);
void snp_vm_launch_finish(struct kvm_vm *vm);
+struct kvm_vm *_vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+ struct kvm_vcpu **cpu,
+ void *init_args);
struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
struct kvm_vcpu **cpu);
void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 34e586d4fec4..93b8e2ccc7b3 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -408,8 +408,8 @@ static uint64_t vm_nr_pages_required(struct vm_shape shape,
return vm_adjust_num_guest_pages(shape.mode, nr_pages);
}
-struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
- uint64_t nr_extra_pages)
+static struct kvm_vm *___vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
+ uint64_t nr_extra_pages, void *args)
{
uint64_t nr_pages = vm_nr_pages_required(shape, nr_runnable_vcpus,
nr_extra_pages);
@@ -447,7 +447,37 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
guest_rng = new_guest_random_state(guest_random_seed);
sync_global_to_guest(vm, guest_rng);
- kvm_arch_vm_post_create(vm);
+ kvm_arch_vm_post_create(vm, args);
+
+ return vm;
+}
+
+struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
+ uint64_t nr_extra_pages)
+{
+ return ___vm_create(shape, nr_runnable_vcpus, nr_extra_pages, NULL);
+}
+
+struct kvm_vm *__vm_create_with_args(struct vm_shape shape, uint32_t nr_runnable_vcpus,
+ uint64_t nr_extra_pages, void *args)
+{
+ return ___vm_create(shape, nr_runnable_vcpus, nr_extra_pages, args);
+}
+
+struct kvm_vm *___vm_create_with_vcpus(struct vm_shape shape,
+ uint32_t nr_vcpus, uint64_t extra_mem_pages,
+ void *guest_code, struct kvm_vcpu *vcpus[],
+ void *args)
+{
+ struct kvm_vm *vm;
+ int i;
+
+ TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
+
+ vm = ___vm_create(shape, nr_vcpus, extra_mem_pages, args);
+
+ for (i = 0; i < nr_vcpus; ++i)
+ vcpus[i] = vm_vcpu_add(vm, i, guest_code);
return vm;
}
@@ -475,17 +505,8 @@ struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
uint64_t extra_mem_pages,
void *guest_code, struct kvm_vcpu *vcpus[])
{
- struct kvm_vm *vm;
- int i;
-
- TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
-
- vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
-
- for (i = 0; i < nr_vcpus; ++i)
- vcpus[i] = vm_vcpu_add(vm, i, guest_code);
-
- return vm;
+ return ___vm_create_with_vcpus(shape, nr_vcpus, extra_mem_pages, guest_code,
+ vcpus, NULL);
}
struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
@@ -2270,7 +2291,7 @@ void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
}
}
-__weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
+__weak void kvm_arch_vm_post_create(struct kvm_vm *vm, void *args)
{
}
diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c
index 2d6105b1f610..09474be27986 100644
--- a/tools/testing/selftests/kvm/lib/x86/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86/processor.c
@@ -9,6 +9,7 @@
#include "processor.h"
#include "sev.h"
#include "apic.h"
+#include "savic.h"
#ifndef NUM_INTERRUPTS
#define NUM_INTERRUPTS 256
@@ -631,7 +632,7 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
REPORT_GUEST_ASSERT(uc);
}
-void kvm_arch_vm_post_create(struct kvm_vm *vm)
+void kvm_arch_vm_post_create(struct kvm_vm *vm, void *sev_init_args)
{
int r;
@@ -648,7 +649,8 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
if (is_sev_vm(vm)) {
struct kvm_sev_init init = { 0 };
- vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
+ vm_sev_ioctl(vm, KVM_SEV_INIT2, sev_init_args ?
+ (struct kvm_sev_init *)sev_init_args : &init);
}
r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL);
diff --git a/tools/testing/selftests/kvm/lib/x86/sev.c b/tools/testing/selftests/kvm/lib/x86/sev.c
index 518e30275960..7675950efe56 100644
--- a/tools/testing/selftests/kvm/lib/x86/sev.c
+++ b/tools/testing/selftests/kvm/lib/x86/sev.c
@@ -291,8 +291,9 @@ void snp_vm_launch_finish(struct kvm_vm *vm)
vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish);
}
-struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
- struct kvm_vcpu **cpu)
+struct kvm_vm *_vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+ struct kvm_vcpu **cpu,
+ void *init_args)
{
struct vm_shape shape = {
.mode = VM_MODE_DEFAULT,
@@ -301,7 +302,7 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
struct kvm_vm *vm;
struct kvm_vcpu *cpus[1];
- vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus);
+ vm = ___vm_create_with_vcpus(shape, 1, 0, guest_code, cpus, init_args);
*cpu = cpus[0];
return vm;
@@ -319,6 +320,12 @@ static bool is_savic_enabled(void)
return supported_vmsa_features & BIT_ULL(SVM_FEAT_SECURE_AVIC);
}
+struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+ struct kvm_vcpu **cpu)
+{
+ return _vm_sev_create_with_one_vcpu(type, guest_code, cpu, NULL);
+}
+
void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement)
{
if (is_sev_es_vm(vm))
diff --git a/tools/testing/selftests/kvm/s390/cmma_test.c b/tools/testing/selftests/kvm/s390/cmma_test.c
index e32dd59703a0..b6a3fa1d71aa 100644
--- a/tools/testing/selftests/kvm/s390/cmma_test.c
+++ b/tools/testing/selftests/kvm/s390/cmma_test.c
@@ -145,7 +145,7 @@ static void finish_vm_setup(struct kvm_vm *vm)
slot0 = memslot2region(vm, 0);
ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
- kvm_arch_vm_post_create(vm);
+ kvm_arch_vm_post_create(vm, NULL);
}
static struct kvm_vm *create_vm_two_memslots(void)
--
2.34.1
Powered by blists - more mailing lists