[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZgOwVvTVlvk3iN3x@google.com>
Date: Wed, 27 Mar 2024 05:36:22 +0000
From: Mingwei Zhang <mizhang@...gle.com>
To: Dapeng Mi <dapeng1.mi@...ux.intel.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Jim Mattson <jmattson@...gle.com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, Zhenyu Wang <zhenyuw@...ux.intel.com>,
Zhang Xiong <xiong.y.zhang@...el.com>,
Like Xu <like.xu.linux@...il.com>,
Jinrong Liang <cloudliang@...cent.com>,
Dapeng Mi <dapeng1.mi@...el.com>
Subject: Re: [kvm-unit-tests Patch v3 04/11] x86: pmu: Switch instructions
and core cycles events sequence
On Wed, Jan 03, 2024, Dapeng Mi wrote:
> When running pmu test on SPR, sometimes the following failure is
> reported.
>
> PMU version: 2
> GP counters: 8
> GP counter width: 48
> Mask length: 8
> Fixed counters: 3
> Fixed counter width: 48
> 1000000 <= 55109398 <= 50000000
> FAIL: Intel: core cycles-0
> 1000000 <= 18279571 <= 50000000
> PASS: Intel: core cycles-1
> 1000000 <= 12238092 <= 50000000
> PASS: Intel: core cycles-2
> 1000000 <= 7981727 <= 50000000
> PASS: Intel: core cycles-3
> 1000000 <= 6984711 <= 50000000
> PASS: Intel: core cycles-4
> 1000000 <= 6773673 <= 50000000
> PASS: Intel: core cycles-5
> 1000000 <= 6697842 <= 50000000
> PASS: Intel: core cycles-6
> 1000000 <= 6747947 <= 50000000
> PASS: Intel: core cycles-7
>
> The count of the "core cycles" on first counter would exceed the upper
> boundary and leads to a failure, and then the "core cycles" count would
> drop gradually and reach a stable state.
>
> That looks reasonable. The "core cycles" event is defined as the 1st
> event in xxx_gp_events[] array and it is always verified at first.
> when the program loop() is executed at the first time it needs to warm
> up the pipeline and cache, such as it has to wait for cache is filled.
> All these warm-up work leads to a quite large core cycles count which
> may exceeds the verification range.
>
> The event "instructions" instead of "core cycles" is a good choice as
> the warm-up event since it would always return a fixed count. Thus
> switch instructions and core cycles events sequence in the
> xxx_gp_events[] array.
The observation is great. However, it is hard to agree that we fix the
problem by switching the order. Maybe directly tweaking the N from 50 to
a larger value makes more sense.
Thanks.
-Mingwei
>
> Signed-off-by: Dapeng Mi <dapeng1.mi@...ux.intel.com>
> ---
> x86/pmu.c | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/x86/pmu.c b/x86/pmu.c
> index a42fff8d8b36..67ebfbe55b49 100644
> --- a/x86/pmu.c
> +++ b/x86/pmu.c
> @@ -31,16 +31,16 @@ struct pmu_event {
> int min;
> int max;
> } intel_gp_events[] = {
> - {"core cycles", 0x003c, 1*N, 50*N},
> {"instructions", 0x00c0, 10*N, 10.2*N},
> + {"core cycles", 0x003c, 1*N, 50*N},
> {"ref cycles", 0x013c, 1*N, 30*N},
> {"llc references", 0x4f2e, 1, 2*N},
> {"llc misses", 0x412e, 1, 1*N},
> {"branches", 0x00c4, 1*N, 1.1*N},
> {"branch misses", 0x00c5, 0, 0.1*N},
> }, amd_gp_events[] = {
> - {"core cycles", 0x0076, 1*N, 50*N},
> {"instructions", 0x00c0, 10*N, 10.2*N},
> + {"core cycles", 0x0076, 1*N, 50*N},
> {"branches", 0x00c2, 1*N, 1.1*N},
> {"branch misses", 0x00c3, 0, 0.1*N},
> }, fixed_events[] = {
> @@ -307,7 +307,7 @@ static void check_counter_overflow(void)
> int i;
> pmu_counter_t cnt = {
> .ctr = MSR_GP_COUNTERx(0),
> - .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */,
> + .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[0].unit_sel /* instructions */,
> };
> overflow_preset = measure_for_overflow(&cnt);
>
> @@ -365,11 +365,11 @@ static void check_gp_counter_cmask(void)
> {
> pmu_counter_t cnt = {
> .ctr = MSR_GP_COUNTERx(0),
> - .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */,
> + .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[0].unit_sel /* instructions */,
> };
> cnt.config |= (0x2 << EVNTSEL_CMASK_SHIFT);
> measure_one(&cnt);
> - report(cnt.count < gp_events[1].min, "cmask");
> + report(cnt.count < gp_events[0].min, "cmask");
> }
>
> static void do_rdpmc_fast(void *ptr)
> @@ -446,7 +446,7 @@ static void check_running_counter_wrmsr(void)
> uint64_t count;
> pmu_counter_t evt = {
> .ctr = MSR_GP_COUNTERx(0),
> - .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel,
> + .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[0].unit_sel,
> };
>
> report_prefix_push("running counter wrmsr");
> @@ -455,7 +455,7 @@ static void check_running_counter_wrmsr(void)
> loop();
> wrmsr(MSR_GP_COUNTERx(0), 0);
> stop_event(&evt);
> - report(evt.count < gp_events[1].min, "cntr");
> + report(evt.count < gp_events[0].min, "cntr");
>
> /* clear status before overflow test */
> if (this_cpu_has_perf_global_status())
> @@ -493,7 +493,7 @@ static void check_emulated_instr(void)
> pmu_counter_t instr_cnt = {
> .ctr = MSR_GP_COUNTERx(1),
> /* instructions */
> - .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel,
> + .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[0].unit_sel,
> };
> report_prefix_push("emulated instruction");
>
> --
> 2.34.1
>
Powered by blists - more mailing lists