[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260113152643.18858-3-jackabt.amazon@gmail.com>
Date: Tue, 13 Jan 2026 15:26:41 +0000
From: Jack Thomson <jackabt.amazon@...il.com>
To: maz@...nel.org,
oliver.upton@...ux.dev,
pbonzini@...hat.com
Cc: joey.gouly@....com,
suzuki.poulose@....com,
yuzenghui@...wei.com,
catalin.marinas@....com,
will@...nel.org,
shuah@...nel.org,
linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org,
isaku.yamahata@...el.com,
xmarcalx@...zon.co.uk,
kalyazin@...zon.co.uk,
jackabt@...zon.com
Subject: [PATCH v4 2/3] KVM: selftests: Enable pre_fault_memory_test for arm64
From: Jack Thomson <jackabt@...zon.com>
Enable the pre_fault_memory_test to run on arm64 by making it work with
different guest page sizes and testing multiple guest configurations.
Update the test_assert to compare against the UCALL_EXIT_REASON, for
portability, as arm64 exits with KVM_EXIT_MMIO while x86 uses
KVM_EXIT_IO.
Signed-off-by: Jack Thomson <jackabt@...zon.com>
---
tools/testing/selftests/kvm/Makefile.kvm | 1 +
.../selftests/kvm/pre_fault_memory_test.c | 85 ++++++++++++++-----
2 files changed, 63 insertions(+), 23 deletions(-)
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index ba5c2b643efa..6d6a74ddad30 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -187,6 +187,7 @@ TEST_GEN_PROGS_arm64 += memslot_perf_test
TEST_GEN_PROGS_arm64 += mmu_stress_test
TEST_GEN_PROGS_arm64 += rseq_test
TEST_GEN_PROGS_arm64 += steal_time
+TEST_GEN_PROGS_arm64 += pre_fault_memory_test
TEST_GEN_PROGS_s390 = $(TEST_GEN_PROGS_COMMON)
TEST_GEN_PROGS_s390 += s390/memop
diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c
index 93e603d91311..be1a84a6c137 100644
--- a/tools/testing/selftests/kvm/pre_fault_memory_test.c
+++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c
@@ -11,19 +11,29 @@
#include <kvm_util.h>
#include <processor.h>
#include <pthread.h>
+#include <guest_modes.h>
/* Arbitrarily chosen values */
-#define TEST_SIZE (SZ_2M + PAGE_SIZE)
-#define TEST_NPAGES (TEST_SIZE / PAGE_SIZE)
+#define TEST_BASE_SIZE SZ_2M
#define TEST_SLOT 10
-static void guest_code(uint64_t base_gva)
+/* Storage of test info to share with guest code */
+struct test_config {
+ uint64_t page_size;
+ uint64_t test_size;
+ uint64_t test_num_pages;
+};
+
+static struct test_config test_config;
+
+static void guest_code(uint64_t base_gpa)
{
volatile uint64_t val __used;
+ struct test_config *config = &test_config;
int i;
- for (i = 0; i < TEST_NPAGES; i++) {
- uint64_t *src = (uint64_t *)(base_gva + i * PAGE_SIZE);
+ for (i = 0; i < config->test_num_pages; i++) {
+ uint64_t *src = (uint64_t *)(base_gpa + i * config->page_size);
val = *src;
}
@@ -56,7 +66,7 @@ static void *delete_slot_worker(void *__data)
cpu_relax();
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, data->gpa,
- TEST_SLOT, TEST_NPAGES, data->flags);
+ TEST_SLOT, test_config.test_num_pages, data->flags);
return NULL;
}
@@ -159,22 +169,35 @@ static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 base_gpa, u64 offset,
KVM_PRE_FAULT_MEMORY, ret, vcpu->vm);
}
-static void __test_pre_fault_memory(unsigned long vm_type, bool private)
+struct test_params {
+ unsigned long vm_type;
+ bool private;
+};
+
+static void __test_pre_fault_memory(enum vm_guest_mode guest_mode, void *arg)
{
uint64_t gpa, gva, alignment, guest_page_size;
+ struct test_params *p = arg;
const struct vm_shape shape = {
- .mode = VM_MODE_DEFAULT,
- .type = vm_type,
+ .mode = guest_mode,
+ .type = p->vm_type,
};
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
+ pr_info("Testing guest mode: %s\n", vm_guest_mode_string(guest_mode));
+
vm = vm_create_shape_with_one_vcpu(shape, &vcpu, guest_code);
- alignment = guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
- gpa = (vm->max_gfn - TEST_NPAGES) * guest_page_size;
+ guest_page_size = vm_guest_mode_params[guest_mode].page_size;
+
+ test_config.page_size = guest_page_size;
+ test_config.test_size = TEST_BASE_SIZE + test_config.page_size;
+ test_config.test_num_pages = vm_calc_num_guest_pages(vm->mode, test_config.test_size);
+
+ gpa = (vm->max_gfn - test_config.test_num_pages) * test_config.page_size;
#ifdef __s390x__
alignment = max(0x100000UL, guest_page_size);
#else
@@ -183,23 +206,32 @@ static void __test_pre_fault_memory(unsigned long vm_type, bool private)
gpa = align_down(gpa, alignment);
gva = gpa & ((1ULL << (vm->va_bits - 1)) - 1);
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, TEST_SLOT,
- TEST_NPAGES, private ? KVM_MEM_GUEST_MEMFD : 0);
- virt_map(vm, gva, gpa, TEST_NPAGES);
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ gpa, TEST_SLOT, test_config.test_num_pages,
+ p->private ? KVM_MEM_GUEST_MEMFD : 0);
+ virt_map(vm, gva, gpa, test_config.test_num_pages);
+
+ if (p->private)
+ vm_mem_set_private(vm, gpa, test_config.test_size);
+ pre_fault_memory(vcpu, gpa, 0, TEST_BASE_SIZE, 0, p->private);
+ /* Test pre-faulting over an already faulted range */
+ pre_fault_memory(vcpu, gpa, 0, TEST_BASE_SIZE, 0, p->private);
+ pre_fault_memory(vcpu, gpa, TEST_BASE_SIZE,
+ test_config.page_size * 2, test_config.page_size, p->private);
+ pre_fault_memory(vcpu, gpa, test_config.test_size,
+ test_config.page_size, test_config.page_size, p->private);
- if (private)
- vm_mem_set_private(vm, gpa, TEST_SIZE);
+ vcpu_args_set(vcpu, 1, gva);
- pre_fault_memory(vcpu, gpa, 0, SZ_2M, 0, private);
- pre_fault_memory(vcpu, gpa, SZ_2M, PAGE_SIZE * 2, PAGE_SIZE, private);
- pre_fault_memory(vcpu, gpa, TEST_SIZE, PAGE_SIZE, PAGE_SIZE, private);
+ /* Export the shared variables to the guest. */
+ sync_global_to_guest(vm, test_config);
- vcpu_args_set(vcpu, 1, gva);
vcpu_run(vcpu);
run = vcpu->run;
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Wanted KVM_EXIT_IO, got exit reason: %u (%s)",
+ TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
+ "Wanted %s, got exit reason: %u (%s)",
+ exit_reason_str(UCALL_EXIT_REASON),
run->exit_reason, exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu, &uc)) {
@@ -218,18 +250,25 @@ static void __test_pre_fault_memory(unsigned long vm_type, bool private)
static void test_pre_fault_memory(unsigned long vm_type, bool private)
{
+ struct test_params p = {
+ .vm_type = vm_type,
+ .private = private,
+ };
+
if (vm_type && !(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type))) {
pr_info("Skipping tests for vm_type 0x%lx\n", vm_type);
return;
}
- __test_pre_fault_memory(vm_type, private);
+ for_each_guest_mode(__test_pre_fault_memory, &p);
}
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_check_cap(KVM_CAP_PRE_FAULT_MEMORY));
+ guest_modes_append_default();
+
test_pre_fault_memory(0, false);
#ifdef __x86_64__
test_pre_fault_memory(KVM_X86_SW_PROTECTED_VM, false);
--
2.43.0
Powered by blists - more mailing lists