lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260126074331.815684-2-chen.dylane@linux.dev>
Date: Mon, 26 Jan 2026 15:43:29 +0800
From: Tao Chen <chen.dylane@...ux.dev>
To: peterz@...radead.org,
	mingo@...hat.com,
	acme@...nel.org,
	namhyung@...nel.org,
	mark.rutland@....com,
	alexander.shishkin@...ux.intel.com,
	jolsa@...nel.org,
	irogers@...gle.com,
	adrian.hunter@...el.com,
	kan.liang@...ux.intel.com,
	song@...nel.org,
	ast@...nel.org,
	daniel@...earbox.net,
	andrii@...nel.org,
	martin.lau@...ux.dev,
	eddyz87@...il.com,
	yonghong.song@...ux.dev,
	john.fastabend@...il.com,
	kpsingh@...nel.org,
	sdf@...ichev.me,
	haoluo@...gle.com
Cc: linux-perf-users@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	bpf@...r.kernel.org,
	Tao Chen <chen.dylane@...ux.dev>
Subject: [PATCH bpf-next v8 1/3] perf: Add rctx in perf_callchain_entry

Record rctx inside the perf_callchain_entry itself, when callers of
get_callchain_entry no longer care about the assignment of rctx, and
will be used in the next patch.

Suggested-by: Andrii Nakryiko <andrii@...nel.org>
Signed-off-by: Tao Chen <chen.dylane@...ux.dev>
---
 include/linux/perf_event.h |  5 +++--
 kernel/bpf/stackmap.c      |  5 ++---
 kernel/events/callchain.c  | 27 ++++++++++++++++-----------
 3 files changed, 21 insertions(+), 16 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9870d768db4..f0489843ebc 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -57,6 +57,7 @@
 #include <asm/local.h>
 
 struct perf_callchain_entry {
+	int				rctx;
 	u64				nr;
 	u64				ip[]; /* /proc/sys/kernel/perf_event_max_stack */
 };
@@ -1723,8 +1724,8 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
 		   u32 max_stack, bool crosstask, bool add_mark, u64 defer_cookie);
 extern int get_callchain_buffers(int max_stack);
 extern void put_callchain_buffers(void);
-extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
-extern void put_callchain_entry(int rctx);
+extern struct perf_callchain_entry *get_callchain_entry(void);
+extern void put_callchain_entry(struct perf_callchain_entry *entry);
 
 extern int sysctl_perf_event_max_stack;
 extern int sysctl_perf_event_max_contexts_per_stack;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index da3d328f5c1..e77dcdc2164 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -214,9 +214,8 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
 {
 #ifdef CONFIG_STACKTRACE
 	struct perf_callchain_entry *entry;
-	int rctx;
 
-	entry = get_callchain_entry(&rctx);
+	entry = get_callchain_entry();
 
 	if (!entry)
 		return NULL;
@@ -238,7 +237,7 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
 			to[i] = (u64)(from[i]);
 	}
 
-	put_callchain_entry(rctx);
+	put_callchain_entry(entry);
 
 	return entry;
 #else /* CONFIG_STACKTRACE */
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index b9c7e00725d..6cdbc5937b1 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -151,31 +151,36 @@ void put_callchain_buffers(void)
 	}
 }
 
-struct perf_callchain_entry *get_callchain_entry(int *rctx)
+struct perf_callchain_entry *get_callchain_entry(void)
 {
 	int cpu;
+	int rctx;
 	struct callchain_cpus_entries *entries;
+	struct perf_callchain_entry *entry;
 
-	*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
-	if (*rctx == -1)
+	rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
+	if (rctx == -1)
 		return NULL;
 
 	entries = rcu_dereference(callchain_cpus_entries);
 	if (!entries) {
-		put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx);
+		put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
 		return NULL;
 	}
 
 	cpu = smp_processor_id();
 
-	return (((void *)entries->cpu_entries[cpu]) +
-		(*rctx * perf_callchain_entry__sizeof()));
+	entry = ((void *)entries->cpu_entries[cpu]) +
+		(rctx * perf_callchain_entry__sizeof());
+	entry->rctx = rctx;
+
+	return entry;
 }
 
 void
-put_callchain_entry(int rctx)
+put_callchain_entry(struct perf_callchain_entry *entry)
 {
-	put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
+	put_recursion_context(this_cpu_ptr(callchain_recursion), entry->rctx);
 }
 
 static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entry,
@@ -222,13 +227,13 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
 {
 	struct perf_callchain_entry *entry;
 	struct perf_callchain_entry_ctx ctx;
-	int rctx, start_entry_idx;
+	int start_entry_idx;
 
 	/* crosstask is not supported for user stacks */
 	if (crosstask && user && !kernel)
 		return NULL;
 
-	entry = get_callchain_entry(&rctx);
+	entry = get_callchain_entry();
 	if (!entry)
 		return NULL;
 
@@ -272,7 +277,7 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
 	}
 
 exit_put:
-	put_callchain_entry(rctx);
+	put_callchain_entry(entry);
 
 	return entry;
 }
-- 
2.48.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ