[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240813164244.751597-7-coltonlewis@google.com>
Date: Tue, 13 Aug 2024 16:42:44 +0000
From: Colton Lewis <coltonlewis@...gle.com>
To: kvm@...r.kernel.org
Cc: Mingwei Zhang <mizhang@...gle.com>, Jinrong Liang <ljr.kernel@...il.com>,
Jim Mattson <jmattson@...gle.com>, Aaron Lewis <aaronlewis@...gle.com>,
Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>, Shuah Khan <shuah@...nel.org>,
linux-kselftest@...r.kernel.org, linux-kernel@...r.kernel.org,
Colton Lewis <coltonlewis@...gle.com>
Subject: [PATCH 6/6] KVM: x86: selftests: Test PerfMonV2
Test PerfMonV2, which defines global registers to enable multiple
performance counters with a single MSR write, in its own function.
If the feature is available, ensure the global control register has
the ability to start and stop the performance counters and the global
status register correctly flags an overflow by the associated counter.
Signed-off-by: Colton Lewis <coltonlewis@...gle.com>
---
.../selftests/kvm/x86_64/pmu_counters_test.c | 53 +++++++++++++++++++
1 file changed, 53 insertions(+)
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index fae078b444b3..a6aa37ee460a 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -750,10 +750,63 @@ static void guest_test_core_events(void)
}
}
+static void guest_test_perf_mon_v2(void)
+{
+ uint64_t i;
+ uint64_t eventsel = ARCH_PERFMON_EVENTSEL_OS |
+ ARCH_PERFMON_EVENTSEL_ENABLE |
+ AMD_ZEN_CORE_CYCLES;
+ bool core_ext = this_cpu_has(X86_FEATURE_PERF_CTR_EXT_CORE);
+ uint64_t sel_msr_base = core_ext ? MSR_F15H_PERF_CTL : MSR_K7_EVNTSEL0;
+ uint64_t cnt_msr_base = core_ext ? MSR_F15H_PERF_CTR : MSR_K7_PERFCTR0;
+ uint64_t msr_step = core_ext ? 2 : 1;
+ uint8_t nr_counters = this_cpu_property(X86_PROPERTY_NUM_PERF_CTR_CORE);
+ bool perf_mon_v2 = this_cpu_has(X86_FEATURE_PERF_MON_V2);
+ uint64_t sel_msr;
+ uint64_t cnt_msr;
+
+ if (!perf_mon_v2)
+ return;
+
+ for (i = 0; i < nr_counters; i++) {
+ sel_msr = sel_msr_base + msr_step * i;
+ cnt_msr = cnt_msr_base + msr_step * i;
+
+ /* Ensure count stays 0 when global register disables counter. */
+ wrmsr(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
+ wrmsr(sel_msr, eventsel);
+ wrmsr(cnt_msr, 0);
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
+ GUEST_ASSERT(!_rdpmc(i));
+
+ /* Ensure counter is >0 when global register enables counter. */
+ wrmsr(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, BIT_ULL(i));
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
+ wrmsr(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
+ GUEST_ASSERT(_rdpmc(i));
+
+ /* Ensure global status register flags a counter overflow. */
+ wrmsr(cnt_msr, -1);
+ wrmsr(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, 0xff);
+ wrmsr(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, BIT_ULL(i));
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
+ wrmsr(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
+ GUEST_ASSERT(rdmsr(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS) &
+ BIT_ULL(i));
+
+ /* Ensure global status register flag is cleared correctly. */
+ wrmsr(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, BIT_ULL(i));
+ GUEST_ASSERT(!(rdmsr(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS) &
+ BIT_ULL(i)));
+ }
+}
+
+
static void guest_test_core_counters(void)
{
guest_test_rdwr_core_counters();
guest_test_core_events();
+ guest_test_perf_mon_v2();
GUEST_DONE();
}
--
2.46.0.76.ge559c4bf1a-goog
Powered by blists - more mailing lists