[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <173928636641.10177.8036083101201125379.tip-bot2@tip-bot2>
Date: Tue, 11 Feb 2025 15:06:06 -0000
From: "tip-bot2 for Kan Liang" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Kan Liang <kan.liang@...ux.intel.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>, stable@...r.kernel.org,
x86@...nel.org, linux-kernel@...r.kernel.org
Subject: [tip: perf/urgent] perf/x86/intel: Fix ARCH_PERFMON_NUM_COUNTER_LEAF
The following commit has been merged into the perf/urgent branch of tip:
Commit-ID: 47a973fd75639fe80d59f9e1860113bb2a0b112b
Gitweb: https://git.kernel.org/tip/47a973fd75639fe80d59f9e1860113bb2a0b112b
Author: Kan Liang <kan.liang@...ux.intel.com>
AuthorDate: Wed, 29 Jan 2025 07:48:19 -08:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Sat, 08 Feb 2025 15:47:25 +01:00
perf/x86/intel: Fix ARCH_PERFMON_NUM_COUNTER_LEAF
The EAX of the CPUID Leaf 023H enumerates the mask of valid sub-leaves.
To tell the availability of the sub-leaf 1 (enumerate the counter mask),
perf should check the bit 1 (0x2) of EAS, rather than bit 0 (0x1).
The error is not user-visible on bare metal. Because the sub-leaf 0 and
the sub-leaf 1 are always available. However, it may bring issues in a
virtualization environment when a VMM only enumerates the sub-leaf 0.
Introduce the cpuid35_e?x to replace the macros, which makes the
implementation style consistent.
Fixes: eb467aaac21e ("perf/x86/intel: Support Architectural PerfMon Extension leaf")
Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: stable@...r.kernel.org
Link: https://lkml.kernel.org/r/20250129154820.3755948-3-kan.liang@linux.intel.com
---
arch/x86/events/intel/core.c | 18 ++++++++++--------
arch/x86/include/asm/perf_event.h | 28 +++++++++++++++++++++++++---
2 files changed, 35 insertions(+), 11 deletions(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 966f783..f3d5b71 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4905,20 +4905,22 @@ static inline bool intel_pmu_broken_perf_cap(void)
static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
{
- unsigned int sub_bitmaps, eax, ebx, ecx, edx;
+ unsigned int cntr, fixed_cntr, ecx, edx;
+ union cpuid35_eax eax;
+ union cpuid35_ebx ebx;
- cpuid(ARCH_PERFMON_EXT_LEAF, &sub_bitmaps, &ebx, &ecx, &edx);
+ cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
- if (ebx & ARCH_PERFMON_EXT_UMASK2)
+ if (ebx.split.umask2)
pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2;
- if (ebx & ARCH_PERFMON_EXT_EQ)
+ if (ebx.split.eq)
pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ;
- if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
+ if (eax.split.cntr_subleaf) {
cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
- &eax, &ebx, &ecx, &edx);
- pmu->cntr_mask64 = eax;
- pmu->fixed_cntr_mask64 = ebx;
+ &cntr, &fixed_cntr, &ecx, &edx);
+ pmu->cntr_mask64 = cntr;
+ pmu->fixed_cntr_mask64 = fixed_cntr;
}
if (!intel_pmu_broken_perf_cap()) {
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 1ac79f3..0ba8d20 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -188,11 +188,33 @@ union cpuid10_edx {
* detection/enumeration details:
*/
#define ARCH_PERFMON_EXT_LEAF 0x00000023
-#define ARCH_PERFMON_EXT_UMASK2 0x1
-#define ARCH_PERFMON_EXT_EQ 0x2
-#define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT 0x1
#define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1
+union cpuid35_eax {
+ struct {
+ unsigned int leaf0:1;
+ /* Counters Sub-Leaf */
+ unsigned int cntr_subleaf:1;
+ /* Auto Counter Reload Sub-Leaf */
+ unsigned int acr_subleaf:1;
+ /* Events Sub-Leaf */
+ unsigned int events_subleaf:1;
+ unsigned int reserved:28;
+ } split;
+ unsigned int full;
+};
+
+union cpuid35_ebx {
+ struct {
+ /* UnitMask2 Supported */
+ unsigned int umask2:1;
+ /* EQ-bit Supported */
+ unsigned int eq:1;
+ unsigned int reserved:30;
+ } split;
+ unsigned int full;
+};
+
/*
* Intel Architectural LBR CPUID detection/enumeration details:
*/
Powered by blists - more mailing lists