lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <165282567475.4207.12287002853279789468.tip-bot2@tip-bot2>
Date:   Tue, 17 May 2022 22:14:34 -0000
From:   "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To:     linux-tip-commits@...r.kernel.org
Cc:     "Peter Zijlstra (Intel)" <peterz@...radead.org>, x86@...nel.org,
        linux-kernel@...r.kernel.org
Subject: [tip: perf/core] perf/x86/amd: Fix AMD BRS period adjustment

The following commit has been merged into the perf/core branch of tip:

Commit-ID:     3c27b0c6ea48bc61492a138c410e262735d660ab
Gitweb:        https://git.kernel.org/tip/3c27b0c6ea48bc61492a138c410e262735d660ab
Author:        Peter Zijlstra <peterz@...radead.org>
AuthorDate:    Tue, 10 May 2022 21:22:04 +02:00
Committer:     Peter Zijlstra <peterz@...radead.org>
CommitterDate: Wed, 18 May 2022 00:08:25 +02:00

perf/x86/amd: Fix AMD BRS period adjustment

There's two problems with the current amd_brs_adjust_period() code:

 - it isn't in fact AMD specific and wil always adjust the period;

 - it adjusts the period, while it should only adjust the event count,
   resulting in repoting a short period.

Fix this by using x86_pmu.limit_period, this makes it specific to the
AMD BRS case and ensures only the event count is adjusted while the
reported period is unmodified.

Fixes: ba2fe7500845 ("perf/x86/amd: Add AMD branch sampling period adjustment")
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 arch/x86/events/amd/core.c   | 13 +++++++++++++
 arch/x86/events/core.c       |  7 -------
 arch/x86/events/perf_event.h | 18 ------------------
 3 files changed, 13 insertions(+), 25 deletions(-)

diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index d81eac2..3eee59c 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -1255,6 +1255,18 @@ static void amd_pmu_sched_task(struct perf_event_context *ctx,
 		amd_pmu_brs_sched_task(ctx, sched_in);
 }
 
+static u64 amd_pmu_limit_period(struct perf_event *event, u64 left)
+{
+	/*
+	 * Decrease period by the depth of the BRS feature to get the last N
+	 * taken branches and approximate the desired period
+	 */
+	if (has_branch_stack(event) && left > x86_pmu.lbr_nr)
+		left -= x86_pmu.lbr_nr;
+
+	return left;
+}
+
 static __initconst const struct x86_pmu amd_pmu = {
 	.name			= "AMD",
 	.handle_irq		= amd_pmu_handle_irq,
@@ -1415,6 +1427,7 @@ static int __init amd_core_pmu_init(void)
 	if (boot_cpu_data.x86 >= 0x19 && !amd_brs_init()) {
 		x86_pmu.get_event_constraints = amd_get_event_constraints_f19h;
 		x86_pmu.sched_task = amd_pmu_sched_task;
+		x86_pmu.limit_period = amd_pmu_limit_period;
 		/*
 		 * put_event_constraints callback same as Fam17h, set above
 		 */
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index b08052b..3078889 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1375,13 +1375,6 @@ int x86_perf_event_set_period(struct perf_event *event)
 		return x86_pmu.set_topdown_event_period(event);
 
 	/*
-	 * decrease period by the depth of the BRS feature to get
-	 * the last N taken branches and approximate the desired period
-	 */
-	if (has_branch_stack(event))
-		period = amd_brs_adjust_period(period);
-
-	/*
 	 * If we are way outside a reasonable range then just skip forward:
 	 */
 	if (unlikely(left <= -period)) {
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 3b03245..21a5482 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1254,14 +1254,6 @@ static inline void amd_pmu_brs_del(struct perf_event *event)
 }
 
 void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in);
-
-static inline s64 amd_brs_adjust_period(s64 period)
-{
-	if (period > x86_pmu.lbr_nr)
-		return period - x86_pmu.lbr_nr;
-
-	return period;
-}
 #else
 static inline int amd_brs_init(void)
 {
@@ -1290,11 +1282,6 @@ static inline void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool s
 {
 }
 
-static inline s64 amd_brs_adjust_period(s64 period)
-{
-	return period;
-}
-
 static inline void amd_brs_enable_all(void)
 {
 }
@@ -1324,11 +1311,6 @@ static inline void amd_brs_enable_all(void)
 static inline void amd_brs_disable_all(void)
 {
 }
-
-static inline s64 amd_brs_adjust_period(s64 period)
-{
-	return period;
-}
 #endif /* CONFIG_CPU_SUP_AMD */
 
 static inline int is_pebs_pt(struct perf_event *event)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ