[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250925061213.178796-13-dapeng1.mi@linux.intel.com>
Date: Thu, 25 Sep 2025 14:12:08 +0800
From: Dapeng Mi <dapeng1.mi@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Jiri Olsa <jolsa@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Kan Liang <kan.liang@...ux.intel.com>,
Andi Kleen <ak@...ux.intel.com>,
Eranian Stephane <eranian@...gle.com>
Cc: Mark Rutland <mark.rutland@....com>,
broonie@...nel.org,
Ravi Bangoria <ravi.bangoria@....com>,
linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
Dapeng Mi <dapeng1.mi@...el.com>,
Dapeng Mi <dapeng1.mi@...ux.intel.com>
Subject: [Patch v4 12/17] perf/x86: Add SSP into sample_regs
From: Kan Liang <kan.liang@...ux.intel.com>
The SSP is only supported when the new SIMD registers configuration
method is used, which moves the XMM to sample_simd_vec_regs. So the
space can be reclaimed for the SSP.
The SSP is retrieved by XSAVE. Only support the SSP for X86_64.
Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
Co-developed-by: Dapeng Mi <dapeng1.mi@...ux.intel.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@...ux.intel.com>
---
arch/x86/events/core.c | 11 ++++++++++-
arch/x86/events/perf_event.h | 10 ++++++++++
arch/x86/include/asm/perf_event.h | 4 ++++
arch/x86/include/uapi/asm/perf_regs.h | 7 +++++--
arch/x86/kernel/perf_regs.c | 8 +++++++-
5 files changed, 36 insertions(+), 4 deletions(-)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index a435610f4d4a..7c29c9029379 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -434,6 +434,8 @@ static void x86_pmu_get_ext_regs(struct x86_perf_regs *perf_regs, u64 mask)
perf_regs->opmask = get_xsave_addr(xsave, XFEATURE_OPMASK);
if (valid_mask & XFEATURE_MASK_APX)
perf_regs->egpr = get_xsave_addr(xsave, XFEATURE_APX);
+ if (valid_mask & XFEATURE_MASK_CET_USER)
+ perf_regs->cet = get_xsave_addr(xsave, XFEATURE_CET_USER);
}
static void release_ext_regs_buffers(void)
@@ -712,7 +714,7 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) {
if (event_has_simd_regs(event)) {
- u64 reserved = ~GENMASK_ULL(PERF_REG_X86_64_MAX - 1, 0);
+ u64 reserved = ~GENMASK_ULL(PERF_REG_MISC_MAX - 1, 0);
if (!(event->pmu->capabilities & PERF_PMU_CAP_SIMD_REGS))
return -EINVAL;
@@ -726,6 +728,10 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event_needs_egprs(event) &&
!(x86_pmu.ext_regs_mask & XFEATURE_MASK_APX))
return -EINVAL;
+ if (event_needs_ssp(event) &&
+ !(x86_pmu.ext_regs_mask & XFEATURE_MASK_CET_USER))
+ return -EINVAL;
+
/* Not require any vector registers but set width */
if (event->attr.sample_simd_vec_reg_qwords &&
!event->attr.sample_simd_vec_reg_intr &&
@@ -1923,6 +1929,9 @@ static void x86_pmu_setup_extended_regs_data(struct perf_event *event,
perf_regs->egpr_regs = NULL;
if (event_needs_egprs(event))
mask |= XFEATURE_MASK_APX;
+ perf_regs->cet_regs = NULL;
+ if (event_needs_ssp(event))
+ mask |= XFEATURE_MASK_CET_USER;
mask &= ~ignore_mask;
if (mask)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 3dd0e669ddd4..6ff4aa23833f 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -192,6 +192,16 @@ static inline bool event_needs_egprs(struct perf_event *event)
return false;
}
+static inline bool event_needs_ssp(struct perf_event *event)
+{
+ if (event->attr.sample_simd_regs_enabled &&
+ (event->attr.sample_regs_user & BIT_ULL(PERF_REG_X86_SSP) ||
+ event->attr.sample_regs_intr & BIT_ULL(PERF_REG_X86_SSP)))
+ return true;
+
+ return false;
+}
+
struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 73c2064c13f9..9d10299355c5 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -621,6 +621,10 @@ struct x86_perf_regs {
u64 *egpr_regs;
struct apx_state *egpr;
};
+ union {
+ u64 *cet_regs;
+ struct cet_user_state *cet;
+ };
};
extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h
index 38644de89815..0cf0490c47b2 100644
--- a/arch/x86/include/uapi/asm/perf_regs.h
+++ b/arch/x86/include/uapi/asm/perf_regs.h
@@ -28,9 +28,9 @@ enum perf_event_x86_regs {
PERF_REG_X86_R14,
PERF_REG_X86_R15,
/*
- * The EGPRs and XMM have overlaps. Only one can be used
+ * The EGPRs/SSP and XMM have overlaps. Only one can be used
* at a time. For the ABI type PERF_SAMPLE_REGS_ABI_SIMD,
- * utilize EGPRs. For the other ABI type, XMM is used.
+ * utilize EGPRs/SSP. For the other ABI type, XMM is used.
*
* Extended GPRs (EGPRs)
*/
@@ -54,6 +54,9 @@ enum perf_event_x86_regs {
PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
PERF_REG_X86_64_MAX = PERF_REG_X86_R31 + 1,
+ PERF_REG_X86_SSP,
+ PERF_REG_MISC_MAX = PERF_REG_X86_SSP + 1,
+
/* These all need two bits set because they are 128bit */
PERF_REG_X86_XMM0 = 32,
PERF_REG_X86_XMM1 = 34,
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index b98b47a79d02..4d519867a3ef 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -101,6 +101,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return 0;
return perf_regs->egpr_regs[idx - PERF_REG_X86_R16];
}
+ if (idx == PERF_REG_X86_SSP) {
+ if (!perf_regs->cet_regs)
+ return 0;
+ return perf_regs->cet_regs[1];
+ }
} else {
if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) {
if (!perf_regs->xmm_regs)
@@ -191,7 +196,8 @@ int perf_simd_reg_validate(u16 vec_qwords, u64 vec_mask,
~((1ULL << PERF_REG_X86_MAX) - 1))
#ifdef CONFIG_X86_32
-#define REG_NOSUPPORT GENMASK_ULL(PERF_REG_X86_R31, PERF_REG_X86_R8)
+#define REG_NOSUPPORT (GENMASK_ULL(PERF_REG_X86_R31, PERF_REG_X86_R8) | \
+ BIT_ULL(PERF_REG_X86_SSP))
int perf_reg_validate(u64 mask)
{
--
2.34.1
Powered by blists - more mailing lists