lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241210093449.1662-9-ravi.bangoria@amd.com>
Date: Tue, 10 Dec 2024 09:34:47 +0000
From: Ravi Bangoria <ravi.bangoria@....com>
To: <peterz@...radead.org>, <mingo@...hat.com>, <namhyung@...nel.org>
CC: <ravi.bangoria@....com>, <acme@...nel.org>, <eranian@...gle.com>,
	<mark.rutland@....com>, <alexander.shishkin@...ux.intel.com>,
	<jolsa@...nel.org>, <irogers@...gle.com>, <adrian.hunter@...el.com>,
	<kan.liang@...ux.intel.com>, <tglx@...utronix.de>, <bp@...en8.de>,
	<dave.hansen@...ux.intel.com>, <x86@...nel.org>, <hpa@...or.com>,
	<linux-perf-users@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
	<santosh.shukla@....com>, <ananth.narayan@....com>, <sandipan.das@....com>
Subject: [PATCH v3 08/10] perf/core: Introduce pmu->adjust_period() callback

Many hardware PMUs have constraints about sample period. For ex, minimum
supported sample period for IBS Op PMU is 0x90, the sample period must
be multiple of 0x10 for IBS Fetch and IBS Op.

Add an optional callback adjust_period() to struct PMU to allow PMU
specific drivers to adjust sample period calculated by generic code.
This will ensure the sample_period value will always be valid and no
additional code is required in PMU specific drivers to re-adjust the
period.

Acked-by: Namhyung Kim <namhyung@...nel.org>
Signed-off-by: Ravi Bangoria <ravi.bangoria@....com>
---
 arch/x86/events/amd/ibs.c  | 11 +++++++++++
 include/linux/perf_event.h |  5 +++++
 kernel/events/core.c       | 12 ++++++++++--
 3 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index aea893a971b6..db6dc7b231e2 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -586,6 +586,15 @@ static int perf_ibs_check_period(struct perf_event *event, u64 value)
 	return 0;
 }
 
+static u64 perf_ibs_adjust_period(struct perf_event *event, u64 period)
+{
+	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
+
+	period &= ~0xFULL;
+
+	return period < perf_ibs->min_period ? perf_ibs->min_period : period;
+}
+
 /*
  * We need to initialize with empty group if all attributes in the
  * group are dynamic.
@@ -719,6 +728,7 @@ static struct perf_ibs perf_ibs_fetch = {
 		.stop		= perf_ibs_stop,
 		.read		= perf_ibs_read,
 		.check_period	= perf_ibs_check_period,
+		.adjust_period	= perf_ibs_adjust_period,
 	},
 	.msr			= MSR_AMD64_IBSFETCHCTL,
 	.config_mask		= IBS_FETCH_MAX_CNT | IBS_FETCH_RAND_EN,
@@ -744,6 +754,7 @@ static struct perf_ibs perf_ibs_op = {
 		.stop		= perf_ibs_stop,
 		.read		= perf_ibs_read,
 		.check_period	= perf_ibs_check_period,
+		.adjust_period	= perf_ibs_adjust_period,
 	},
 	.msr			= MSR_AMD64_IBSOPCTL,
 	.config_mask		= IBS_OP_MAX_CNT,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8333f132f4a9..4dcc51f5d2b6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -582,6 +582,11 @@ struct pmu {
 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
 	 */
 	int (*check_period)		(struct perf_event *event, u64 value); /* optional */
+
+	/*
+	 * Adjust period value according to PMU constraints.
+	 */
+	u64 (*adjust_period)		(struct perf_event *event, u64 period); /* optional */
 };
 
 enum perf_addr_filter_action_t {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b2bc67791f84..e71aded67ce6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4192,9 +4192,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
 	if (!sample_period)
 		sample_period = 1;
 
-	hwc->sample_period = sample_period;
+	hwc->sample_period = event->pmu->adjust_period(event, sample_period);
 
-	if (local64_read(&hwc->period_left) > 8*sample_period) {
+	if (local64_read(&hwc->period_left) > 8*hwc->sample_period) {
 		if (disable)
 			event->pmu->stop(event, PERF_EF_UPDATE);
 
@@ -11519,6 +11519,11 @@ static int perf_event_nop_int(struct perf_event *event, u64 value)
 	return 0;
 }
 
+static u64 perf_pmu_nop_adjust_period(struct perf_event *event, u64 period)
+{
+	return period;
+}
+
 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
 
 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -11856,6 +11861,9 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
 	if (!pmu->check_period)
 		pmu->check_period = perf_event_nop_int;
 
+	if (!pmu->adjust_period)
+		pmu->adjust_period = perf_pmu_nop_adjust_period;
+
 	if (!pmu->event_idx)
 		pmu->event_idx = perf_event_idx_default;
 
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ