[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220829101321.505933457@infradead.org>
Date: Mon, 29 Aug 2022 12:10:01 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: x86@...nel.org, kan.liang@...ux.intel.com, eranian@...gle.com,
ravi.bangoria@....com
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
acme@...nel.org, mark.rutland@....com,
alexander.shishkin@...ux.intel.com, jolsa@...nel.org,
namhyung@...nel.org
Subject: [PATCH v2 2/9] perf/x86/intel: Move the topdown stuff into the intel driver
Use the new x86_pmu::{set_period,update}() methods to push the topdown
stuff into the Intel driver, where it belongs.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/x86/events/core.c | 7 -------
arch/x86/events/intel/core.c | 28 +++++++++++++++++++++++++---
2 files changed, 25 insertions(+), 10 deletions(-)
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -119,9 +119,6 @@ u64 x86_perf_event_update(struct perf_ev
if (unlikely(!hwc->event_base))
return 0;
- if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event)
- return x86_pmu.update_topdown_event(event);
-
/*
* Careful: an NMI might modify the previous event value.
*
@@ -1373,10 +1370,6 @@ int x86_perf_event_set_period(struct per
if (unlikely(!hwc->event_base))
return 0;
- if (unlikely(is_topdown_count(event)) &&
- x86_pmu.set_topdown_event_period)
- return x86_pmu.set_topdown_event_period(event);
-
/*
* If we are way outside a reasonable range then just skip forward:
*/
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2301,7 +2301,7 @@ static void intel_pmu_nhm_workaround(voi
for (i = 0; i < 4; i++) {
event = cpuc->events[i];
if (event)
- x86_perf_event_update(event);
+ static_call(x86_pmu_update)(event);
}
for (i = 0; i < 4; i++) {
@@ -2316,7 +2316,7 @@ static void intel_pmu_nhm_workaround(voi
event = cpuc->events[i];
if (event) {
- x86_perf_event_set_period(event);
+ static_call(x86_pmu_set_period)(event);
__x86_pmu_enable_event(&event->hw,
ARCH_PERFMON_EVENTSEL_ENABLE);
} else
@@ -2793,7 +2793,7 @@ static void intel_pmu_add_event(struct p
*/
int intel_pmu_save_and_restart(struct perf_event *event)
{
- x86_perf_event_update(event);
+ static_call(x86_pmu_update)(event);
/*
* For a checkpointed counter always reset back to 0. This
* avoids a situation where the counter overflows, aborts the
@@ -2805,9 +2805,27 @@ int intel_pmu_save_and_restart(struct pe
wrmsrl(event->hw.event_base, 0);
local64_set(&event->hw.prev_count, 0);
}
+ return static_call(x86_pmu_set_period)(event);
+}
+
+static int intel_pmu_set_period(struct perf_event *event)
+{
+ if (unlikely(is_topdown_count(event)) &&
+ x86_pmu.set_topdown_event_period)
+ return x86_pmu.set_topdown_event_period(event);
+
return x86_perf_event_set_period(event);
}
+static u64 intel_pmu_update(struct perf_event *event)
+{
+ if (unlikely(is_topdown_count(event)) &&
+ x86_pmu.update_topdown_event)
+ return x86_pmu.update_topdown_event(event);
+
+ return x86_perf_event_update(event);
+}
+
static void intel_pmu_reset(void)
{
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
@@ -4635,6 +4653,10 @@ static __initconst const struct x86_pmu
.enable_all = core_pmu_enable_all,
.enable = core_pmu_enable_event,
.disable = x86_pmu_disable_event,
+
+ .set_period = intel_pmu_set_period,
+ .update = intel_pmu_update,
+
.hw_config = core_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
Powered by blists - more mailing lists