[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230407233254.957013-2-seanjc@google.com>
Date: Fri, 7 Apr 2023 16:32:49 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Aaron Lewis <aaronlewis@...gle.com>,
Sean Christopherson <seanjc@...gle.com>
Subject: [PATCH v4 1/6] KVM: selftests: Add a common helper for the PMU event
filter guest code
From: Aaron Lewis <aaronlewis@...gle.com>
Split out the common parts of the Intel and AMD guest code in the PMU
event filter test into a helper function. This is in preparation for
adding additional counters to the test.
No functional changes intended.
Signed-off-by: Aaron Lewis <aaronlewis@...gle.com>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
.../kvm/x86_64/pmu_event_filter_test.c | 29 ++++++++++++-------
1 file changed, 18 insertions(+), 11 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 1f60dfae69e0..a00a9d6ea41e 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -100,6 +100,15 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip)
GUEST_SYNC(0);
}
+static uint64_t run_and_measure_loop(uint32_t msr_base)
+{
+ uint64_t branches_retired = rdmsr(msr_base + 0);
+
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+
+ return rdmsr(msr_base + 0) - branches_retired;
+}
+
static void intel_guest_code(void)
{
check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
@@ -108,16 +117,15 @@ static void intel_guest_code(void)
GUEST_SYNC(1);
for (;;) {
- uint64_t br0, br1;
+ uint64_t count;
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
- br0 = rdmsr(MSR_IA32_PMC0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_IA32_PMC0);
- GUEST_SYNC(br1 - br0);
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1);
+
+ count = run_and_measure_loop(MSR_IA32_PMC0);
+ GUEST_SYNC(count);
}
}
@@ -133,15 +141,14 @@ static void amd_guest_code(void)
GUEST_SYNC(1);
for (;;) {
- uint64_t br0, br1;
+ uint64_t count;
wrmsr(MSR_K7_EVNTSEL0, 0);
wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
- br0 = rdmsr(MSR_K7_PERFCTR0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_K7_PERFCTR0);
- GUEST_SYNC(br1 - br0);
+
+ count = run_and_measure_loop(MSR_K7_PERFCTR0);
+ GUEST_SYNC(count);
}
}
--
2.40.0.577.gac1e443424-goog
Powered by blists - more mailing lists