lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250923050942.206116-28-Neeraj.Upadhyay@amd.com>
Date: Tue, 23 Sep 2025 10:39:34 +0530
From: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
To: <kvm@...r.kernel.org>, <seanjc@...gle.com>, <pbonzini@...hat.com>
CC: <linux-kernel@...r.kernel.org>, <Thomas.Lendacky@....com>,
	<nikunj@....com>, <Santosh.Shukla@....com>, <Vasant.Hegde@....com>,
	<Suravee.Suthikulpanit@....com>, <bp@...en8.de>, <David.Kaplan@....com>,
	<huibo.wang@....com>, <naveen.rao@....com>, <pgonda@...gle.com>,
	<linux-kselftest@...r.kernel.org>, <shuah@...nel.org>, <tiala@...rosoft.com>
Subject: [RFC PATCH v2 27/35] KVM: selftests: Add args parameter to kvm_arch_vm_post_create()

The kvm_arch_vm_post_create() hook allows for architecture-specific
setup to be performed immediately after a VM is created. However, it
currently lacks a mechanism to pass custom configuration data from
the test down to this setup phase.

This is a limitation for features that require non-default initialization.
For example, to test SEV-SNP with Secure AVIC (SAVIC), the desired VMSA
features must be configured via the KVM_SEV_INIT2 ioctl. This ioctl is
called within the x86 implementation of kvm_arch_vm_post_create(), but it
currently uses hardcoded default values.

To make this configurable, extend kvm_arch_vm_post_create() with a generic
"void *args" parameter. The x86 implementation of kvm_arch_vm_post_create()
now uses this argument to populate the "struct kvm_sev_init" for the
"KVM_SEV_INIT2" ioctl.

This refactoring makes the VM creation process more flexible and is a
prerequisite for adding selftests for Secure AVIC guests.

Signed-off-by: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
---
 .../testing/selftests/kvm/arm64/set_id_regs.c |  2 +-
 .../testing/selftests/kvm/include/kvm_util.h  |  9 +++-
 tools/testing/selftests/kvm/include/x86/sev.h |  3 ++
 tools/testing/selftests/kvm/lib/kvm_util.c    | 51 +++++++++++++------
 .../testing/selftests/kvm/lib/x86/processor.c |  6 ++-
 tools/testing/selftests/kvm/lib/x86/sev.c     | 13 +++--
 tools/testing/selftests/kvm/s390/cmma_test.c  |  2 +-
 7 files changed, 63 insertions(+), 23 deletions(-)

diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c
index d3bf9204409c..b732ee9efbed 100644
--- a/tools/testing/selftests/kvm/arm64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c
@@ -749,7 +749,7 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
 	ksft_test_result_pass("%s\n", __func__);
 }
 
-void kvm_arch_vm_post_create(struct kvm_vm *vm)
+void kvm_arch_vm_post_create(struct kvm_vm *vm, void *args)
 {
 	if (vm_check_cap(vm, KVM_CAP_ARM_MTE)) {
 		vm_enable_cap(vm, KVM_CAP_ARM_MTE, 0);
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 4a4f9621082d..e5f322994f44 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -992,6 +992,9 @@ static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
 struct kvm_vm *____vm_create(struct vm_shape shape);
 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
 			   uint64_t nr_extra_pages);
+struct kvm_vm *__vm_create_with_args(struct vm_shape shape,
+		uint32_t nr_runnable_vcpus,  uint64_t nr_extra_pages,
+		void *args);
 
 static inline struct kvm_vm *vm_create_barebones(void)
 {
@@ -1016,6 +1019,10 @@ static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
 				      uint64_t extra_mem_pages,
 				      void *guest_code, struct kvm_vcpu *vcpus[]);
+struct kvm_vm *___vm_create_with_vcpus(struct vm_shape shape,
+				       uint32_t nr_vcpus, uint64_t extra_mem_pages,
+				       void *guest_code, struct kvm_vcpu *vcpus[],
+				       void *args);
 
 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
 						  void *guest_code,
@@ -1261,7 +1268,7 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
  */
 void kvm_selftest_arch_init(void);
 
-void kvm_arch_vm_post_create(struct kvm_vm *vm);
+void kvm_arch_vm_post_create(struct kvm_vm *vm, void *args);
 
 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
 
diff --git a/tools/testing/selftests/kvm/include/x86/sev.h b/tools/testing/selftests/kvm/include/x86/sev.h
index d9794f5c2c16..73a23043d6c5 100644
--- a/tools/testing/selftests/kvm/include/x86/sev.h
+++ b/tools/testing/selftests/kvm/include/x86/sev.h
@@ -72,6 +72,9 @@ void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy);
 void snp_vm_launch_update(struct kvm_vm *vm);
 void snp_vm_launch_finish(struct kvm_vm *vm);
 
+struct kvm_vm *_vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+					   struct kvm_vcpu **cpu,
+					   void *init_args);
 struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
 					   struct kvm_vcpu **cpu);
 void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 360f262f5f3f..23272f797f5f 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -473,8 +473,8 @@ static bool is_guest_memfd_required(struct vm_shape shape)
 #endif
 }
 
-struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
-			   uint64_t nr_extra_pages)
+static struct kvm_vm *___vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
+			   uint64_t nr_extra_pages, void *args)
 {
 	uint64_t nr_pages = vm_nr_pages_required(shape, nr_runnable_vcpus,
 						 nr_extra_pages);
@@ -519,7 +519,37 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
 	guest_rng = new_guest_random_state(guest_random_seed);
 	sync_global_to_guest(vm, guest_rng);
 
-	kvm_arch_vm_post_create(vm);
+	kvm_arch_vm_post_create(vm, args);
+
+	return vm;
+}
+
+struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
+			   uint64_t nr_extra_pages)
+{
+	return ___vm_create(shape, nr_runnable_vcpus, nr_extra_pages, NULL);
+}
+
+struct kvm_vm *__vm_create_with_args(struct vm_shape shape, uint32_t nr_runnable_vcpus,
+			   uint64_t nr_extra_pages, void *args)
+{
+	return ___vm_create(shape, nr_runnable_vcpus, nr_extra_pages, args);
+}
+
+struct kvm_vm *___vm_create_with_vcpus(struct vm_shape shape,
+		uint32_t nr_vcpus, uint64_t extra_mem_pages,
+		void *guest_code, struct kvm_vcpu *vcpus[],
+		void *args)
+{
+	struct kvm_vm *vm;
+	int i;
+
+	TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
+
+	vm = ___vm_create(shape, nr_vcpus, extra_mem_pages, args);
+
+	for (i = 0; i < nr_vcpus; ++i)
+		vcpus[i] = vm_vcpu_add(vm, i, guest_code);
 
 	return vm;
 }
@@ -547,17 +577,8 @@ struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
 				      uint64_t extra_mem_pages,
 				      void *guest_code, struct kvm_vcpu *vcpus[])
 {
-	struct kvm_vm *vm;
-	int i;
-
-	TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
-
-	vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
-
-	for (i = 0; i < nr_vcpus; ++i)
-		vcpus[i] = vm_vcpu_add(vm, i, guest_code);
-
-	return vm;
+	return ___vm_create_with_vcpus(shape, nr_vcpus, extra_mem_pages, guest_code,
+			vcpus, NULL);
 }
 
 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
@@ -2357,7 +2378,7 @@ void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
 	TEST_FAIL("Unable to find stat '%s'", name);
 }
 
-__weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
+__weak void kvm_arch_vm_post_create(struct kvm_vm *vm, void *args)
 {
 }
 
diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c
index a33a09a161d3..fc57b948c041 100644
--- a/tools/testing/selftests/kvm/lib/x86/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86/processor.c
@@ -9,6 +9,7 @@
 #include "processor.h"
 #include "sev.h"
 #include "apic.h"
+#include "savic.h"
 
 #ifndef NUM_INTERRUPTS
 #define NUM_INTERRUPTS 256
@@ -631,7 +632,7 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
 		REPORT_GUEST_ASSERT(uc);
 }
 
-void kvm_arch_vm_post_create(struct kvm_vm *vm)
+void kvm_arch_vm_post_create(struct kvm_vm *vm, void *sev_init_args)
 {
 	int r;
 
@@ -648,7 +649,8 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
 	if (is_sev_vm(vm)) {
 		struct kvm_sev_init init = { 0 };
 
-		vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
+		vm_sev_ioctl(vm, KVM_SEV_INIT2, sev_init_args ?
+				(struct kvm_sev_init *)sev_init_args : &init);
 	}
 
 	r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL);
diff --git a/tools/testing/selftests/kvm/lib/x86/sev.c b/tools/testing/selftests/kvm/lib/x86/sev.c
index 113f33ca40b2..257988fce107 100644
--- a/tools/testing/selftests/kvm/lib/x86/sev.c
+++ b/tools/testing/selftests/kvm/lib/x86/sev.c
@@ -260,8 +260,9 @@ void snp_vm_launch_finish(struct kvm_vm *vm)
 	vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish);
 }
 
-struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
-					   struct kvm_vcpu **cpu)
+struct kvm_vm *_vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+					   struct kvm_vcpu **cpu,
+					   void *init_args)
 {
 	struct vm_shape shape = {
 		.mode = VM_MODE_DEFAULT,
@@ -270,7 +271,7 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
 	struct kvm_vm *vm;
 	struct kvm_vcpu *cpus[1];
 
-	vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus);
+	vm = ___vm_create_with_vcpus(shape, 1, 0, guest_code, cpus, init_args);
 	*cpu = cpus[0];
 
 	return vm;
@@ -288,6 +289,12 @@ static bool is_savic_enabled(void)
 	return supported_vmsa_features & BIT_ULL(SVM_FEAT_SECURE_AVIC);
 }
 
+struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+					   struct kvm_vcpu **cpu)
+{
+	return _vm_sev_create_with_one_vcpu(type, guest_code, cpu, NULL);
+}
+
 void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement)
 {
 	if (is_sev_es_vm(vm))
diff --git a/tools/testing/selftests/kvm/s390/cmma_test.c b/tools/testing/selftests/kvm/s390/cmma_test.c
index 85cc8c18d6e7..0476dace3473 100644
--- a/tools/testing/selftests/kvm/s390/cmma_test.c
+++ b/tools/testing/selftests/kvm/s390/cmma_test.c
@@ -145,7 +145,7 @@ static void finish_vm_setup(struct kvm_vm *vm)
 	slot0 = memslot2region(vm, 0);
 	ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
 
-	kvm_arch_vm_post_create(vm);
+	kvm_arch_vm_post_create(vm, NULL);
 }
 
 static struct kvm_vm *create_vm_two_memslots(void)
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ