[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <45a932753580d21627779ccfc1a2400e17dfdd79.1747264138.git.ackerleytng@google.com>
Date: Wed, 14 May 2025 16:41:53 -0700
From: Ackerley Tng <ackerleytng@...gle.com>
To: kvm@...r.kernel.org, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
x86@...nel.org, linux-fsdevel@...r.kernel.org
Cc: ackerleytng@...gle.com, aik@....com, ajones@...tanamicro.com,
akpm@...ux-foundation.org, amoorthy@...gle.com, anthony.yznaga@...cle.com,
anup@...infault.org, aou@...s.berkeley.edu, bfoster@...hat.com,
binbin.wu@...ux.intel.com, brauner@...nel.org, catalin.marinas@....com,
chao.p.peng@...el.com, chenhuacai@...nel.org, dave.hansen@...el.com,
david@...hat.com, dmatlack@...gle.com, dwmw@...zon.co.uk,
erdemaktas@...gle.com, fan.du@...el.com, fvdl@...gle.com, graf@...zon.com,
haibo1.xu@...el.com, hch@...radead.org, hughd@...gle.com, ira.weiny@...el.com,
isaku.yamahata@...el.com, jack@...e.cz, james.morse@....com,
jarkko@...nel.org, jgg@...pe.ca, jgowans@...zon.com, jhubbard@...dia.com,
jroedel@...e.de, jthoughton@...gle.com, jun.miao@...el.com,
kai.huang@...el.com, keirf@...gle.com, kent.overstreet@...ux.dev,
kirill.shutemov@...el.com, liam.merwick@...cle.com,
maciej.wieczor-retman@...el.com, mail@...iej.szmigiero.name, maz@...nel.org,
mic@...ikod.net, michael.roth@....com, mpe@...erman.id.au,
muchun.song@...ux.dev, nikunj@....com, nsaenz@...zon.es,
oliver.upton@...ux.dev, palmer@...belt.com, pankaj.gupta@....com,
paul.walmsley@...ive.com, pbonzini@...hat.com, pdurrant@...zon.co.uk,
peterx@...hat.com, pgonda@...gle.com, pvorel@...e.cz, qperret@...gle.com,
quic_cvanscha@...cinc.com, quic_eberman@...cinc.com,
quic_mnalajal@...cinc.com, quic_pderrin@...cinc.com, quic_pheragu@...cinc.com,
quic_svaddagi@...cinc.com, quic_tsoni@...cinc.com, richard.weiyang@...il.com,
rick.p.edgecombe@...el.com, rientjes@...gle.com, roypat@...zon.co.uk,
rppt@...nel.org, seanjc@...gle.com, shuah@...nel.org, steven.price@....com,
steven.sistare@...cle.com, suzuki.poulose@....com, tabba@...gle.com,
thomas.lendacky@....com, usama.arif@...edance.com, vannapurve@...gle.com,
vbabka@...e.cz, viro@...iv.linux.org.uk, vkuznets@...hat.com,
wei.w.wang@...el.com, will@...nel.org, willy@...radead.org,
xiaoyao.li@...el.com, yan.y.zhao@...el.com, yilun.xu@...el.com,
yuzenghui@...wei.com, zhiquan1.li@...el.com
Subject: [RFC PATCH v2 14/51] KVM: selftests: Update private_mem_conversions_test
to mmap guest_memfd
This patch updates private_mem_conversions_test to use guest_memfd for
both private and shared memory. The guest_memfd conversion ioctls are
used to perform conversions.
Specify -g to also back shared memory with memory from guest_memfd.
Signed-off-by: Ackerley Tng <ackerleytng@...gle.com>
Change-Id: Ibc647dc43fbdddac7cc465886bed92c07bbf4f00
---
.../testing/selftests/kvm/include/kvm_util.h | 1 +
tools/testing/selftests/kvm/lib/kvm_util.c | 36 ++++
.../kvm/x86/private_mem_conversions_test.c | 163 +++++++++++++++---
3 files changed, 176 insertions(+), 24 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index ffe0625f2d71..ded65a15abea 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -721,6 +721,7 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
+int addr_gpa2guest_memfd(struct kvm_vm *vm, vm_paddr_t gpa, loff_t *offset);
#ifndef vcpu_arch_put_guest
#define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 58a3365f479c..253d0c00e2f0 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1734,6 +1734,42 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
+ (gpa - region->region.guest_phys_addr));
}
+/*
+ * Address VM Physical to guest_memfd
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * gpa - VM physical address
+ *
+ * Output Args:
+ * offset - offset in guest_memfd for gpa
+ *
+ * Return:
+ * guest_memfd for
+ *
+ * Locates the memory region containing the VM physical address given by gpa,
+ * within the VM given by vm. When found, the guest_memfd providing the memory
+ * to the vm physical address and the offset in the file corresponding to the
+ * requested gpa is returned. A TEST_ASSERT failure occurs if no region
+ * containing gpa exists.
+ */
+int addr_gpa2guest_memfd(struct kvm_vm *vm, vm_paddr_t gpa, loff_t *offset)
+{
+ struct userspace_mem_region *region;
+
+ gpa = vm_untag_gpa(vm, gpa);
+
+ region = userspace_mem_region_find(vm, gpa, gpa);
+ if (!region) {
+ TEST_FAIL("No vm physical memory at 0x%lx", gpa);
+ return -1;
+ }
+
+ *offset = region->region.guest_memfd_offset + gpa - region->region.guest_phys_addr;
+
+ return region->region.guest_memfd;
+}
+
/*
* Address Host Virtual to VM Physical
*
diff --git a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
index 82a8d88b5338..ec20bb7e95c8 100644
--- a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
+++ b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
@@ -11,6 +11,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
+#include <sys/wait.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
@@ -202,15 +203,19 @@ static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate)
guest_sync_shared(gpa, size, p3, p4);
memcmp_g(gpa, p4, size);
- /* Reset the shared memory back to the initial pattern. */
- memset((void *)gpa, init_p, size);
-
/*
* Free (via PUNCH_HOLE) *all* private memory so that the next
* iteration starts from a clean slate, e.g. with respect to
* whether or not there are pages/folios in guest_mem.
*/
guest_map_shared(base_gpa, PER_CPU_DATA_SIZE, true);
+
+ /*
+ * Reset the entire block back to the initial pattern. Do this
+ * after fallocate(PUNCH_HOLE) because hole-punching zeroes
+ * memory.
+ */
+ memset((void *)base_gpa, init_p, PER_CPU_DATA_SIZE);
}
}
@@ -286,7 +291,8 @@ static void guest_code(uint64_t base_gpa)
GUEST_DONE();
}
-static void handle_exit_hypercall(struct kvm_vcpu *vcpu)
+static void handle_exit_hypercall(struct kvm_vcpu *vcpu,
+ bool back_shared_memory_with_guest_memfd)
{
struct kvm_run *run = vcpu->run;
uint64_t gpa = run->hypercall.args[0];
@@ -303,17 +309,81 @@ static void handle_exit_hypercall(struct kvm_vcpu *vcpu)
if (do_fallocate)
vm_guest_mem_fallocate(vm, gpa, size, map_shared);
- if (set_attributes)
- vm_set_memory_attributes(vm, gpa, size,
- map_shared ? 0 : KVM_MEMORY_ATTRIBUTE_PRIVATE);
+ if (set_attributes) {
+ if (back_shared_memory_with_guest_memfd) {
+ loff_t offset;
+ int guest_memfd;
+
+ guest_memfd = addr_gpa2guest_memfd(vm, gpa, &offset);
+
+ if (map_shared)
+ guest_memfd_convert_shared(guest_memfd, offset, size);
+ else
+ guest_memfd_convert_private(guest_memfd, offset, size);
+ } else {
+ uint64_t attrs;
+
+ attrs = map_shared ? 0 : KVM_MEMORY_ATTRIBUTE_PRIVATE;
+ vm_set_memory_attributes(vm, gpa, size, attrs);
+ }
+ }
run->hypercall.ret = 0;
}
+static void assert_not_faultable(uint8_t *address)
+{
+ pid_t child_pid;
+
+ child_pid = fork();
+ TEST_ASSERT(child_pid != -1, "fork failed");
+
+ if (child_pid == 0) {
+ *address = 'A';
+ TEST_FAIL("Child should have exited with a signal");
+ } else {
+ int status;
+
+ waitpid(child_pid, &status, 0);
+
+ TEST_ASSERT(WIFSIGNALED(status),
+ "Child should have exited with a signal");
+ TEST_ASSERT_EQ(WTERMSIG(status), SIGBUS);
+ }
+}
+
+static void add_memslot(struct kvm_vm *vm, uint64_t gpa, uint32_t slot,
+ uint64_t size, int guest_memfd,
+ uint64_t guest_memfd_offset)
+{
+ struct userspace_mem_region *region;
+
+ region = vm_mem_region_alloc(vm);
+
+ guest_memfd = vm_mem_region_install_guest_memfd(region, guest_memfd);
+
+ vm_mem_region_mmap(region, size, MAP_SHARED, guest_memfd, guest_memfd_offset);
+ vm_mem_region_install_memory(region, size, getpagesize());
+
+ region->region.slot = slot;
+ region->region.flags = KVM_MEM_GUEST_MEMFD;
+ region->region.guest_phys_addr = gpa;
+ region->region.guest_memfd_offset = guest_memfd_offset;
+
+ vm_mem_region_add(vm, region);
+}
+
static bool run_vcpus;
-static void *__test_mem_conversions(void *__vcpu)
+struct test_thread_args
{
- struct kvm_vcpu *vcpu = __vcpu;
+ struct kvm_vcpu *vcpu;
+ bool back_shared_memory_with_guest_memfd;
+};
+
+static void *__test_mem_conversions(void *params)
+{
+ struct test_thread_args *args = params;
+ struct kvm_vcpu *vcpu = args->vcpu;
struct kvm_run *run = vcpu->run;
struct kvm_vm *vm = vcpu->vm;
struct ucall uc;
@@ -325,7 +395,10 @@ static void *__test_mem_conversions(void *__vcpu)
vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_HYPERCALL) {
- handle_exit_hypercall(vcpu);
+ handle_exit_hypercall(
+ vcpu,
+ args->back_shared_memory_with_guest_memfd);
+
continue;
}
@@ -349,8 +422,18 @@ static void *__test_mem_conversions(void *__vcpu)
size_t nr_bytes = min_t(size_t, vm->page_size, size - i);
uint8_t *hva = addr_gpa2hva(vm, gpa + i);
- /* In all cases, the host should observe the shared data. */
- memcmp_h(hva, gpa + i, uc.args[3], nr_bytes);
+ /* Check contents of memory */
+ if (args->back_shared_memory_with_guest_memfd &&
+ uc.args[0] == SYNC_PRIVATE) {
+ assert_not_faultable(hva);
+ } else {
+ /*
+ * If shared and private memory use
+ * separate backing memory, the host
+ * should always observe shared data.
+ */
+ memcmp_h(hva, gpa + i, uc.args[3], nr_bytes);
+ }
/* For shared, write the new pattern to guest memory. */
if (uc.args[0] == SYNC_SHARED)
@@ -366,14 +449,16 @@ static void *__test_mem_conversions(void *__vcpu)
}
}
-static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t nr_vcpus,
- uint32_t nr_memslots)
+static void test_mem_conversions(enum vm_mem_backing_src_type src_type,
+ uint32_t nr_vcpus, uint32_t nr_memslots,
+ bool back_shared_memory_with_guest_memfd)
{
/*
* Allocate enough memory so that each vCPU's chunk of memory can be
* naturally aligned with respect to the size of the backing store.
*/
const size_t alignment = max_t(size_t, SZ_2M, get_backing_src_pagesz(src_type));
+ struct test_thread_args *thread_args[KVM_MAX_VCPUS];
const size_t per_cpu_size = align_up(PER_CPU_DATA_SIZE, alignment);
const size_t memfd_size = per_cpu_size * nr_vcpus;
const size_t slot_size = memfd_size / nr_memslots;
@@ -381,6 +466,7 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t
pthread_t threads[KVM_MAX_VCPUS];
struct kvm_vm *vm;
int memfd, i, r;
+ uint64_t flags;
const struct vm_shape shape = {
.mode = VM_MODE_DEFAULT,
@@ -394,12 +480,23 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t
vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, (1 << KVM_HC_MAP_GPA_RANGE));
- memfd = vm_create_guest_memfd(vm, memfd_size, 0);
+ flags = back_shared_memory_with_guest_memfd ?
+ GUEST_MEMFD_FLAG_SUPPORT_SHARED :
+ 0;
+ memfd = vm_create_guest_memfd(vm, memfd_size, flags);
- for (i = 0; i < nr_memslots; i++)
- vm_mem_add(vm, src_type, BASE_DATA_GPA + slot_size * i,
- BASE_DATA_SLOT + i, slot_size / vm->page_size,
- KVM_MEM_GUEST_MEMFD, memfd, slot_size * i);
+ for (i = 0; i < nr_memslots; i++) {
+ if (back_shared_memory_with_guest_memfd) {
+ add_memslot(vm, BASE_DATA_GPA + slot_size * i,
+ BASE_DATA_SLOT + i, slot_size, memfd,
+ slot_size * i);
+ } else {
+ vm_mem_add(vm, src_type, BASE_DATA_GPA + slot_size * i,
+ BASE_DATA_SLOT + i,
+ slot_size / vm->page_size,
+ KVM_MEM_GUEST_MEMFD, memfd, slot_size * i);
+ }
+ }
for (i = 0; i < nr_vcpus; i++) {
uint64_t gpa = BASE_DATA_GPA + i * per_cpu_size;
@@ -412,13 +509,23 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t
*/
virt_map(vm, gpa, gpa, PER_CPU_DATA_SIZE / vm->page_size);
- pthread_create(&threads[i], NULL, __test_mem_conversions, vcpus[i]);
+ thread_args[i] = malloc(sizeof(struct test_thread_args));
+ TEST_ASSERT(thread_args[i] != NULL,
+ "Could not allocate memory for thread parameters");
+ thread_args[i]->vcpu = vcpus[i];
+ thread_args[i]->back_shared_memory_with_guest_memfd =
+ back_shared_memory_with_guest_memfd;
+
+ pthread_create(&threads[i], NULL, __test_mem_conversions,
+ (void *)thread_args[i]);
}
WRITE_ONCE(run_vcpus, true);
- for (i = 0; i < nr_vcpus; i++)
+ for (i = 0; i < nr_vcpus; i++) {
pthread_join(threads[i], NULL);
+ free(thread_args[i]);
+ }
kvm_vm_free(vm);
@@ -440,7 +547,7 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t
static void usage(const char *cmd)
{
puts("");
- printf("usage: %s [-h] [-m nr_memslots] [-s mem_type] [-n nr_vcpus]\n", cmd);
+ printf("usage: %s [-h] [-g] [-m nr_memslots] [-s mem_type] [-n nr_vcpus]\n", cmd);
puts("");
backing_src_help("-s");
puts("");
@@ -448,18 +555,21 @@ static void usage(const char *cmd)
puts("");
puts(" -m: specify the number of memslots (default: 1)");
puts("");
+ puts(" -g: back shared memory with guest_memfd (default: false)");
+ puts("");
}
int main(int argc, char *argv[])
{
enum vm_mem_backing_src_type src_type = DEFAULT_VM_MEM_SRC;
+ bool back_shared_memory_with_guest_memfd = false;
uint32_t nr_memslots = 1;
uint32_t nr_vcpus = 1;
int opt;
TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM));
- while ((opt = getopt(argc, argv, "hm:s:n:")) != -1) {
+ while ((opt = getopt(argc, argv, "hgm:s:n:")) != -1) {
switch (opt) {
case 's':
src_type = parse_backing_src_type(optarg);
@@ -470,6 +580,9 @@ int main(int argc, char *argv[])
case 'm':
nr_memslots = atoi_positive("nr_memslots", optarg);
break;
+ case 'g':
+ back_shared_memory_with_guest_memfd = true;
+ break;
case 'h':
default:
usage(argv[0]);
@@ -477,7 +590,9 @@ int main(int argc, char *argv[])
}
}
- test_mem_conversions(src_type, nr_vcpus, nr_memslots);
+ test_mem_conversions(src_type, nr_vcpus, nr_memslots,
+ back_shared_memory_with_guest_memfd);
+
return 0;
}
--
2.49.0.1045.g170613ef41-goog
Powered by blists - more mailing lists