[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251203065500.2597594-4-dapeng1.mi@linux.intel.com>
Date: Wed, 3 Dec 2025 14:54:44 +0800
From: Dapeng Mi <dapeng1.mi@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Jiri Olsa <jolsa@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Andi Kleen <ak@...ux.intel.com>,
Eranian Stephane <eranian@...gle.com>
Cc: Mark Rutland <mark.rutland@....com>,
broonie@...nel.org,
Ravi Bangoria <ravi.bangoria@....com>,
linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
Zide Chen <zide.chen@...el.com>,
Falcon Thomas <thomas.falcon@...el.com>,
Dapeng Mi <dapeng1.mi@...el.com>,
Xudong Hao <xudong.hao@...el.com>,
Kan Liang <kan.liang@...ux.intel.com>,
Dapeng Mi <dapeng1.mi@...ux.intel.com>
Subject: [Patch v5 03/19] perf/x86: Introduce x86-specific x86_pmu_setup_regs_data()
From: Kan Liang <kan.liang@...ux.intel.com>
The current perf/x86 implementation uses the generic functions
perf_sample_regs_user() and perf_sample_regs_intr() to set up registers
data for sampling records. While this approach works for general
registers, it falls short when adding sampling support for SIMD and APX
eGPRs registers on x86 platforms.
To address this, we introduce the x86-specific function
x86_pmu_setup_regs_data() for setting up register data on x86 platforms.
At present, x86_pmu_setup_regs_data() mirrors the logic of the generic
functions perf_sample_regs_user() and perf_sample_regs_intr().
Subsequent patches will introduce x86-specific enhancements.
Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@...ux.intel.com>
---
arch/x86/events/core.c | 32 ++++++++++++++++++++++++++++++++
arch/x86/events/intel/ds.c | 9 ++++++---
arch/x86/events/perf_event.h | 4 ++++
3 files changed, 42 insertions(+), 3 deletions(-)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index ef3bf8fbc97f..dcdd2c2d68ee 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1695,6 +1695,38 @@ static void x86_pmu_del(struct perf_event *event, int flags)
static_call_cond(x86_pmu_del)(event);
}
+void x86_pmu_setup_regs_data(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ u64 sample_type = event->attr.sample_type;
+
+ if (sample_type & PERF_SAMPLE_REGS_USER) {
+ if (user_mode(regs)) {
+ data->regs_user.abi = perf_reg_abi(current);
+ data->regs_user.regs = regs;
+ } else if (!(current->flags & PF_KTHREAD)) {
+ perf_get_regs_user(&data->regs_user, regs);
+ } else {
+ data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
+ data->regs_user.regs = NULL;
+ }
+ data->dyn_size += sizeof(u64);
+ if (data->regs_user.regs)
+ data->dyn_size += hweight64(event->attr.sample_regs_user) * sizeof(u64);
+ data->sample_flags |= PERF_SAMPLE_REGS_USER;
+ }
+
+ if (sample_type & PERF_SAMPLE_REGS_INTR) {
+ data->regs_intr.regs = regs;
+ data->regs_intr.abi = perf_reg_abi(current);
+ data->dyn_size += sizeof(u64);
+ if (data->regs_intr.regs)
+ data->dyn_size += hweight64(event->attr.sample_regs_intr) * sizeof(u64);
+ data->sample_flags |= PERF_SAMPLE_REGS_INTR;
+ }
+}
+
int x86_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 2e170f2093ac..c7351f476d8c 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -2180,6 +2180,7 @@ static inline void __setup_pebs_basic_group(struct perf_event *event,
}
static inline void __setup_pebs_gpr_group(struct perf_event *event,
+ struct perf_sample_data *data,
struct pt_regs *regs,
struct pebs_gprs *gprs,
u64 sample_type)
@@ -2189,8 +2190,10 @@ static inline void __setup_pebs_gpr_group(struct perf_event *event,
regs->flags &= ~PERF_EFLAGS_EXACT;
}
- if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
+ if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) {
adaptive_pebs_save_regs(regs, gprs);
+ x86_pmu_setup_regs_data(event, data, regs);
+ }
}
static inline void __setup_pebs_meminfo_group(struct perf_event *event,
@@ -2283,7 +2286,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
gprs = next_record;
next_record = gprs + 1;
- __setup_pebs_gpr_group(event, regs, gprs, sample_type);
+ __setup_pebs_gpr_group(event, data, regs, gprs, sample_type);
}
if (format_group & PEBS_DATACFG_MEMINFO) {
@@ -2407,7 +2410,7 @@ static void setup_arch_pebs_sample_data(struct perf_event *event,
gprs = next_record;
next_record = gprs + 1;
- __setup_pebs_gpr_group(event, regs,
+ __setup_pebs_gpr_group(event, data, regs,
(struct pebs_gprs *)gprs,
sample_type);
}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 3161ec0a3416..80e52e937638 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1294,6 +1294,10 @@ void x86_pmu_enable_event(struct perf_event *event);
int x86_pmu_handle_irq(struct pt_regs *regs);
+void x86_pmu_setup_regs_data(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+
void x86_pmu_show_pmu_cap(struct pmu *pmu);
static inline int x86_pmu_num_counters(struct pmu *pmu)
--
2.34.1
Powered by blists - more mailing lists