[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aO2pROT5K+4J7j9k@yzhao56-desk.sh.intel.com>
Date: Tue, 14 Oct 2025 09:37:08 +0800
From: Yan Zhao <yan.y.zhao@...el.com>
To: Sean Christopherson <seanjc@...gle.com>
CC: Paolo Bonzini <pbonzini@...hat.com>, <kvm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] KVM: selftests: Use "gpa" and "gva" for local variable
names in pre-fault test
On Tue, Oct 07, 2025 at 03:45:15PM -0700, Sean Christopherson wrote:
> Rename guest_test_{phys,virt}_mem to g{p,v}a in the pre-fault memory test
> to shorten line lengths and to use standard terminology.
>
> No functional change intended.
>
> Cc: Yan Zhao <yan.y.zhao@...el.com>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> .../selftests/kvm/pre_fault_memory_test.c | 27 +++++++++----------
> 1 file changed, 12 insertions(+), 15 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c
> index f04768c1d2e4..6db75946a4f8 100644
> --- a/tools/testing/selftests/kvm/pre_fault_memory_test.c
> +++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c
> @@ -161,6 +161,7 @@ static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 base_gpa, u64 offset,
>
> static void __test_pre_fault_memory(unsigned long vm_type, bool private)
> {
> + uint64_t gpa, gva, alignment, guest_page_size;
> const struct vm_shape shape = {
> .mode = VM_MODE_DEFAULT,
> .type = vm_type,
> @@ -170,35 +171,31 @@ static void __test_pre_fault_memory(unsigned long vm_type, bool private)
> struct kvm_vm *vm;
> struct ucall uc;
>
> - uint64_t guest_test_phys_mem;
> - uint64_t guest_test_virt_mem;
> - uint64_t alignment, guest_page_size;
> -
> vm = vm_create_shape_with_one_vcpu(shape, &vcpu, guest_code);
>
> alignment = guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
> - guest_test_phys_mem = (vm->max_gfn - TEST_NPAGES) * guest_page_size;
> + gpa = (vm->max_gfn - TEST_NPAGES) * guest_page_size;
> #ifdef __s390x__
> alignment = max(0x100000UL, guest_page_size);
> #else
> alignment = SZ_2M;
> #endif
> - guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
> - guest_test_virt_mem = guest_test_phys_mem & ((1ULL << (vm->va_bits - 1)) - 1);
> + gpa = align_down(gpa, alignment);
> + gva = gpa & ((1ULL << (vm->va_bits - 1)) - 1);
>
> - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
> - guest_test_phys_mem, TEST_SLOT, TEST_NPAGES,
> + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
Wrap at 80 characters?
> + TEST_SLOT, TEST_NPAGES,
> private ? KVM_MEM_GUEST_MEMFD : 0);
> - virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, TEST_NPAGES);
> + virt_map(vm, gva, gpa, TEST_NPAGES);
>
> if (private)
> - vm_mem_set_private(vm, guest_test_phys_mem, TEST_SIZE);
> + vm_mem_set_private(vm, gpa, TEST_SIZE);
>
> - pre_fault_memory(vcpu, guest_test_phys_mem, 0, SZ_2M, 0, private);
> - pre_fault_memory(vcpu, guest_test_phys_mem, SZ_2M, PAGE_SIZE * 2, PAGE_SIZE, private);
> - pre_fault_memory(vcpu, guest_test_phys_mem, TEST_SIZE, PAGE_SIZE, PAGE_SIZE, private);
> + pre_fault_memory(vcpu, gpa, 0, SZ_2M, 0, private);
> + pre_fault_memory(vcpu, gpa, SZ_2M, PAGE_SIZE * 2, PAGE_SIZE, private);
> + pre_fault_memory(vcpu, gpa, TEST_SIZE, PAGE_SIZE, PAGE_SIZE, private);
>
> - vcpu_args_set(vcpu, 1, guest_test_virt_mem);
> + vcpu_args_set(vcpu, 1, gva);
Should we cleanup guest_code() as below?
-static void guest_code(uint64_t base_gpa)
+static void guest_code(uint64_t base_gva)
{
volatile uint64_t val __used;
int i;
for (i = 0; i < TEST_NPAGES; i++) {
- uint64_t *src = (uint64_t *)(base_gpa + i * PAGE_SIZE);
+ uint64_t *src = (uint64_t *)(base_gva + i * PAGE_SIZE);
val = *src;
}
> vcpu_run(vcpu);
>
> run = vcpu->run;
>
> base-commit: efcebc8f7aeeba15feb1a5bde70af74d96bf1a76
> --
> 2.51.0.710.ga91ca5db03-goog
>
>
Powered by blists - more mailing lists