[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241128005547.4077116-12-seanjc@google.com>
Date: Wed, 27 Nov 2024 16:55:42 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>,
Anup Patel <anup@...infault.org>, Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>,
Paolo Bonzini <pbonzini@...hat.com>, Christian Borntraeger <borntraeger@...ux.ibm.com>,
Janosch Frank <frankja@...ux.ibm.com>, Claudio Imbrenda <imbrenda@...ux.ibm.com>,
Sean Christopherson <seanjc@...gle.com>
Cc: linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
kvm@...r.kernel.org, kvm-riscv@...ts.infradead.org,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
Andrew Jones <ajones@...tanamicro.com>, James Houghton <jthoughton@...gle.com>,
Muhammad Usama Anjum <usama.anjum@...labora.com>
Subject: [PATCH v4 11/16] KVM: selftests: Precisely limit the number of guest
loops in mmu_stress_test
Run the exact number of guest loops required in mmu_stress_test instead
of looping indefinitely in anticipation of adding more stages that run
different code (e.g. reads instead of writes).
Reviewed-by: James Houghton <jthoughton@...gle.com>
Reviewed-by: Andrew Jones <ajones@...tanamicro.com>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
tools/testing/selftests/kvm/mmu_stress_test.c | 25 ++++++++++++++-----
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c
index 656a837c7f49..c6bf18cb7c89 100644
--- a/tools/testing/selftests/kvm/mmu_stress_test.c
+++ b/tools/testing/selftests/kvm/mmu_stress_test.c
@@ -20,12 +20,15 @@
static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
{
uint64_t gpa;
+ int i;
- for (;;) {
+ for (i = 0; i < 2; i++) {
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
- GUEST_SYNC(0);
+ GUEST_SYNC(i);
}
+
+ GUEST_ASSERT(0);
}
struct vcpu_info {
@@ -52,10 +55,18 @@ static void rendezvous_with_boss(void)
}
}
-static void run_vcpu(struct kvm_vcpu *vcpu)
+static void assert_sync_stage(struct kvm_vcpu *vcpu, int stage)
+{
+ struct ucall uc;
+
+ TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
+ TEST_ASSERT_EQ(uc.args[1], stage);
+}
+
+static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{
vcpu_run(vcpu);
- TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
+ assert_sync_stage(vcpu, stage);
}
static void *vcpu_worker(void *data)
@@ -69,7 +80,8 @@ static void *vcpu_worker(void *data)
rendezvous_with_boss();
- run_vcpu(vcpu);
+ /* Stage 0, write all of guest memory. */
+ run_vcpu(vcpu, 0);
rendezvous_with_boss();
#ifdef __x86_64__
vcpu_sregs_get(vcpu, &sregs);
@@ -79,7 +91,8 @@ static void *vcpu_worker(void *data)
#endif
rendezvous_with_boss();
- run_vcpu(vcpu);
+ /* Stage 1, re-write all of guest memory. */
+ run_vcpu(vcpu, 1);
rendezvous_with_boss();
return NULL;
--
2.47.0.338.g60cca15819-goog
Powered by blists - more mailing lists