[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250626195610.405379-14-kan.liang@linux.intel.com>
Date: Thu, 26 Jun 2025 12:56:10 -0700
From: kan.liang@...ux.intel.com
To: peterz@...radead.org,
mingo@...hat.com,
acme@...nel.org,
namhyung@...nel.org,
tglx@...utronix.de,
dave.hansen@...ux.intel.com,
irogers@...gle.com,
adrian.hunter@...el.com,
jolsa@...nel.org,
alexander.shishkin@...ux.intel.com,
linux-kernel@...r.kernel.org
Cc: dapeng1.mi@...ux.intel.com,
ak@...ux.intel.com,
zide.chen@...el.com,
mark.rutland@....com,
broonie@...nel.org,
ravi.bangoria@....com,
Kan Liang <kan.liang@...ux.intel.com>
Subject: [RFC PATCH V2 13/13] perf/x86/intel: Enable PERF_PMU_CAP_SIMD_REGS
From: Kan Liang <kan.liang@...ux.intel.com>
Enable PERF_PMU_CAP_SIMD_REGS if there is XSAVES support for YMM, ZMM,
OPMASK, eGPRs, or SSP.
Disable large PEBS for these registers since PEBS HW doesn't support
them yet.
Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
---
arch/x86/events/intel/core.c | 46 ++++++++++++++++++++++++++++++++++--
1 file changed, 44 insertions(+), 2 deletions(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index c73c2e57d71b..8dc638f9efd2 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4034,8 +4034,30 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
flags &= ~PERF_SAMPLE_TIME;
if (!event->attr.exclude_kernel)
flags &= ~PERF_SAMPLE_REGS_USER;
- if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
- flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
+ if (event->attr.sample_simd_regs_enabled) {
+ u64 nolarge = PERF_X86_EGPRS_MASK | BIT_ULL(PERF_REG_X86_SSP);
+
+ /*
+ * PEBS HW can only collect the XMM0-XMM15 for now.
+ * Disable large PEBS for other vector registers, predicate
+ * registers, eGPRs, and SSP.
+ */
+ if (event->attr.sample_regs_user & nolarge ||
+ fls64(event->attr.sample_simd_vec_reg_user) > PERF_X86_H16ZMM_BASE ||
+ event->attr.sample_simd_pred_reg_user)
+ flags &= ~PERF_SAMPLE_REGS_USER;
+
+ if (event->attr.sample_regs_intr & nolarge ||
+ fls64(event->attr.sample_simd_vec_reg_intr) > PERF_X86_H16ZMM_BASE ||
+ event->attr.sample_simd_pred_reg_intr)
+ flags &= ~PERF_SAMPLE_REGS_INTR;
+
+ if (event->attr.sample_simd_vec_reg_qwords > PERF_X86_XMM_QWORDS)
+ flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
+ } else {
+ if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
+ flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
+ }
return flags;
}
@@ -5296,6 +5318,26 @@ static void intel_extended_regs_init(struct pmu *pmu)
x86_pmu.ext_regs_mask |= X86_EXT_REGS_XMM;
x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
+
+ if (boot_cpu_has(X86_FEATURE_AVX) &&
+ cpu_has_xfeatures(XFEATURE_MASK_YMM, NULL))
+ x86_pmu.ext_regs_mask |= X86_EXT_REGS_YMM;
+ if (boot_cpu_has(X86_FEATURE_APX) &&
+ cpu_has_xfeatures(XFEATURE_MASK_APX, NULL))
+ x86_pmu.ext_regs_mask |= X86_EXT_REGS_EGPRS;
+ if (boot_cpu_has(X86_FEATURE_AVX512F)) {
+ if (cpu_has_xfeatures(XFEATURE_MASK_OPMASK, NULL))
+ x86_pmu.ext_regs_mask |= X86_EXT_REGS_OPMASK;
+ if (cpu_has_xfeatures(XFEATURE_MASK_ZMM_Hi256, NULL))
+ x86_pmu.ext_regs_mask |= X86_EXT_REGS_ZMMH;
+ if (cpu_has_xfeatures(XFEATURE_MASK_Hi16_ZMM, NULL))
+ x86_pmu.ext_regs_mask |= X86_EXT_REGS_H16ZMM;
+ }
+ if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ x86_pmu.ext_regs_mask |= X86_EXT_REGS_CET;
+
+ if (x86_pmu.ext_regs_mask != X86_EXT_REGS_XMM)
+ x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_SIMD_REGS;
}
static void update_pmu_cap(struct pmu *pmu)
--
2.38.1
Powered by blists - more mailing lists