[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220511142345.084235472@infradead.org>
Date: Wed, 11 May 2022 16:20:38 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: x86@...nel.org, kan.liang@...ux.intel.com, eranian@...gle.com
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
acme@...nel.org, mark.rutland@....com,
alexander.shishkin@...ux.intel.com, jolsa@...nel.org,
namhyung@...nel.org
Subject: [PATCH 1/5] perf/x86/amd: Fix AMD BRS period adjustment
There's two problems with the current amd_brs_adjust_period() code:
- it isn't in fact AMD specific and wil always adjust the period;
- it adjusts the period, while it should only adjust the event count,
resulting in repoting a short period.
Fix this by using x86_pmu.limit_period, this makes it specific to the
AMD BRS case and ensures only the event count is adjusted while the
reported period is unmodified.
Fixes: ba2fe7500845 ("perf/x86/amd: Add AMD branch sampling period adjustment")
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/x86/events/amd/core.c | 13 +++++++++++++
arch/x86/events/core.c | 7 -------
arch/x86/events/perf_event.h | 18 ------------------
3 files changed, 13 insertions(+), 25 deletions(-)
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -1258,6 +1258,18 @@ static void amd_pmu_sched_task(struct pe
amd_pmu_brs_sched_task(ctx, sched_in);
}
+static u64 amd_pmu_limit_period(struct perf_event *event, u64 left)
+{
+ /*
+ * Decrease period by the depth of the BRS feature to get the last N
+ * taken branches and approximate the desired period
+ */
+ if (has_branch_stack(event) && left > x86_pmu.lbr_nr)
+ left -= x86_pmu.lbr_nr;
+
+ return left;
+}
+
static __initconst const struct x86_pmu amd_pmu = {
.name = "AMD",
.handle_irq = amd_pmu_handle_irq,
@@ -1418,6 +1430,7 @@ static int __init amd_core_pmu_init(void
if (boot_cpu_data.x86 >= 0x19 && !amd_brs_init()) {
x86_pmu.get_event_constraints = amd_get_event_constraints_f19h;
x86_pmu.sched_task = amd_pmu_sched_task;
+ x86_pmu.limit_period = amd_pmu_limit_period;
/*
* put_event_constraints callback same as Fam17h, set above
*/
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1375,13 +1375,6 @@ int x86_perf_event_set_period(struct per
return x86_pmu.set_topdown_event_period(event);
/*
- * decrease period by the depth of the BRS feature to get
- * the last N taken branches and approximate the desired period
- */
- if (has_branch_stack(event))
- period = amd_brs_adjust_period(period);
-
- /*
* If we are way outside a reasonable range then just skip forward:
*/
if (unlikely(left <= -period)) {
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1254,14 +1254,6 @@ static inline void amd_pmu_brs_del(struc
}
void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in);
-
-static inline s64 amd_brs_adjust_period(s64 period)
-{
- if (period > x86_pmu.lbr_nr)
- return period - x86_pmu.lbr_nr;
-
- return period;
-}
#else
static inline int amd_brs_init(void)
{
@@ -1290,11 +1282,6 @@ static inline void amd_pmu_brs_sched_tas
{
}
-static inline s64 amd_brs_adjust_period(s64 period)
-{
- return period;
-}
-
static inline void amd_brs_enable_all(void)
{
}
@@ -1324,11 +1311,6 @@ static inline void amd_brs_enable_all(vo
static inline void amd_brs_disable_all(void)
{
}
-
-static inline s64 amd_brs_adjust_period(s64 period)
-{
- return period;
-}
#endif /* CONFIG_CPU_SUP_AMD */
static inline int is_pebs_pt(struct perf_event *event)
Powered by blists - more mailing lists