[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240911204158.2034295-12-seanjc@google.com>
Date: Wed, 11 Sep 2024 13:41:56 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>,
Anup Patel <anup@...infault.org>, Paolo Bonzini <pbonzini@...hat.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>, Janosch Frank <frankja@...ux.ibm.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>
Cc: linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
kvm@...r.kernel.org, kvm-riscv@...ts.infradead.org,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
Sean Christopherson <seanjc@...gle.com>, James Houghton <jthoughton@...gle.com>
Subject: [PATCH v2 11/13] KVM: selftests: Precisely limit the number of guest
loops in mmu_stress_test
Run the exact number of guest loops required in mmu_stress_test instead
of looping indefinitely in anticipation of adding more stages that run
different code (e.g. reads instead of writes).
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
tools/testing/selftests/kvm/mmu_stress_test.c | 25 ++++++++++++++-----
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c
index 80863e8290db..9573ed0e696d 100644
--- a/tools/testing/selftests/kvm/mmu_stress_test.c
+++ b/tools/testing/selftests/kvm/mmu_stress_test.c
@@ -19,12 +19,15 @@
static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
{
uint64_t gpa;
+ int i;
- for (;;) {
+ for (i = 0; i < 2; i++) {
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
- GUEST_SYNC(0);
+ GUEST_SYNC(i);
}
+
+ GUEST_ASSERT(0);
}
struct vcpu_info {
@@ -51,10 +54,18 @@ static void rendezvous_with_boss(void)
}
}
-static void run_vcpu(struct kvm_vcpu *vcpu)
+static void assert_sync_stage(struct kvm_vcpu *vcpu, int stage)
+{
+ struct ucall uc;
+
+ TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
+ TEST_ASSERT_EQ(uc.args[1], stage);
+}
+
+static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{
vcpu_run(vcpu);
- TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
+ assert_sync_stage(vcpu, stage);
}
static void *vcpu_worker(void *data)
@@ -68,7 +79,8 @@ static void *vcpu_worker(void *data)
rendezvous_with_boss();
- run_vcpu(vcpu);
+ /* Stage 0, write all of guest memory. */
+ run_vcpu(vcpu, 0);
rendezvous_with_boss();
#ifdef __x86_64__
vcpu_sregs_get(vcpu, &sregs);
@@ -78,7 +90,8 @@ static void *vcpu_worker(void *data)
#endif
rendezvous_with_boss();
- run_vcpu(vcpu);
+ /* Stage 1, re-write all of guest memory. */
+ run_vcpu(vcpu, 1);
rendezvous_with_boss();
return NULL;
--
2.46.0.598.g6f2099f65c-goog
Powered by blists - more mailing lists