lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250408171530.140858-4-mark.barnett@arm.com>
Date: Tue,  8 Apr 2025 18:15:28 +0100
From: mark.barnett@....com
To: peterz@...radead.org,
	mingo@...hat.com,
	acme@...nel.org,
	namhyung@...nel.org,
	irogers@...gle.com
Cc: ben.gainey@....com,
	deepak.surti@....com,
	ak@...ux.intel.com,
	will@...nel.org,
	james.clark@....com,
	mark.rutland@....com,
	alexander.shishkin@...ux.intel.com,
	jolsa@...nel.org,
	adrian.hunter@...el.com,
	linux-perf-users@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	linux-arm-kernel@...ts.infradead.org,
	Mark Barnett <mark.barnett@....com>
Subject: [PATCH v4 3/5] perf: Allow adding fixed random jitter to the sampling period

From: Ben Gainey <ben.gainey@....com>

This change modifies the core perf overflow handler, adding some small
random jitter to each sample period when the high-frequency sample
period is in use. A new flag is added to perf_event_attr to opt into
this behaviour.

This change follows the discussion in [1], where it is recognized that it
may be possible for certain patterns of execution to end up with biased
results.

[1] https://lore.kernel.org/linux-perf-users/Zc24eLqZycmIg3d2@tassilo/

Signed-off-by: Ben Gainey <ben.gainey@....com>
Signed-off-by: Mark Barnett <mark.barnett@....com>
---
 include/linux/perf_event.h |  1 +
 kernel/events/core.c       | 26 ++++++++++++++++++++++++++
 2 files changed, 27 insertions(+)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index be006965054e..78a6fd14b412 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -233,6 +233,7 @@ struct hw_perf_event {
 
 #define PERF_SPS_HF_ON			0x00000001
 #define PERF_SPS_HF_SAMPLE		0x00000002
+#define PERF_SPS_HF_RAND		0x00000004
 	u32				sample_period_state;
 
 	/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5752ac7408b1..bc6991a33048 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -56,6 +56,7 @@
 #include <linux/buildid.h>
 #include <linux/task_work.h>
 #include <linux/percpu-rwsem.h>
+#include <linux/prandom.h>
 
 #include "internal.h"
 
@@ -472,6 +473,8 @@ static int perf_sample_period_ns __read_mostly	= DEFAULT_SAMPLE_PERIOD_NS;
 static int perf_sample_allowed_ns __read_mostly =
 	DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
 
+static DEFINE_PER_CPU(struct rnd_state, sample_period_jitter_rnd);
+
 static void update_perf_cpu_limits(void)
 {
 	u64 tmp = perf_sample_period_ns;
@@ -10224,6 +10227,19 @@ static int __perf_event_overflow(struct perf_event *event,
 	 *
 	 * By ignoring the HF samples, we measure the actual period.
 	 */
+
+	/*
+	 * Apply optional jitter to the overall sample period
+	 */
+	if (hwc->sample_period_state & PERF_SPS_HF_RAND
+			&& !(hwc->sample_period_state & PERF_SPS_HF_SAMPLE)) {
+		struct rnd_state *state = &get_cpu_var(sample_period_jitter_rnd);
+		u64 rand_period = 1 << event->attr.hf_sample_rand;
+
+		sample_period -= rand_period / 2;
+		sample_period += prandom_u32_state(state) & (rand_period - 1);
+	}
+
 	if (hwc->sample_period_state & PERF_SPS_HF_ON) {
 		u64 hf_sample_period = event->attr.hf_sample_period;
 
@@ -12756,6 +12772,14 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 	if (attr->hf_sample_period)
 		hwc->sample_period_state |= PERF_SPS_HF_ON;
 
+	if (attr->hf_sample_rand) {
+		/* high-frequency jitter is only valid with a high-freq period */
+		if (!attr->hf_sample_period)
+			return ERR_PTR(-EINVAL);
+
+		hwc->sample_period_state |= PERF_SPS_HF_RAND;
+	}
+
 	/*
 	 * Disallow uncore-task events. Similarly, disallow uncore-cgroup
 	 * events (they don't make sense as the cgroup will be different
@@ -14367,6 +14391,7 @@ static void __init perf_event_init_all_cpus(void)
 	zalloc_cpumask_var(&perf_online_pkg_mask, GFP_KERNEL);
 	zalloc_cpumask_var(&perf_online_sys_mask, GFP_KERNEL);
 
+	prandom_seed_full_state(&sample_period_jitter_rnd);
 
 	for_each_possible_cpu(cpu) {
 		swhash = &per_cpu(swevent_htable, cpu);
@@ -14384,6 +14409,7 @@ static void __init perf_event_init_all_cpus(void)
 		cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
 		cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default);
 		cpuctx->heap = cpuctx->heap_default;
+
 	}
 }
 
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ