lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260126074331.815684-3-chen.dylane@linux.dev>
Date: Mon, 26 Jan 2026 15:43:30 +0800
From: Tao Chen <chen.dylane@...ux.dev>
To: peterz@...radead.org,
	mingo@...hat.com,
	acme@...nel.org,
	namhyung@...nel.org,
	mark.rutland@....com,
	alexander.shishkin@...ux.intel.com,
	jolsa@...nel.org,
	irogers@...gle.com,
	adrian.hunter@...el.com,
	kan.liang@...ux.intel.com,
	song@...nel.org,
	ast@...nel.org,
	daniel@...earbox.net,
	andrii@...nel.org,
	martin.lau@...ux.dev,
	eddyz87@...il.com,
	yonghong.song@...ux.dev,
	john.fastabend@...il.com,
	kpsingh@...nel.org,
	sdf@...ichev.me,
	haoluo@...gle.com
Cc: linux-perf-users@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	bpf@...r.kernel.org,
	Tao Chen <chen.dylane@...ux.dev>
Subject: [PATCH bpf-next v8 2/3] perf: Refactor get_perf_callchain

>From BPF stack map, we want to ensure that the callchain buffer
will not be overwritten by other preemptive tasks and we also aim
to reduce the preempt disable interval, Based on the suggestions from Peter
and Andrrii, export new API __get_perf_callchain and the usage scenarios
are as follows from BPF side:

preempt_disable()
entry = get_callchain_entry()
preempt_enable()
__get_perf_callchain(entry)
put_callchain_entry(entry)

Suggested-by: Andrii Nakryiko <andrii@...nel.org>
Signed-off-by: Tao Chen <chen.dylane@...ux.dev>
---
 include/linux/perf_event.h |  5 +++++
 kernel/events/callchain.c  | 34 ++++++++++++++++++++++------------
 2 files changed, 27 insertions(+), 12 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f0489843ebc..7132fa97bb1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1722,6 +1722,11 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
 extern struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
 		   u32 max_stack, bool crosstask, bool add_mark, u64 defer_cookie);
+
+extern int __get_perf_callchain(struct perf_callchain_entry *entry, struct pt_regs *regs,
+				bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark,
+				u64 defer_cookie);
+
 extern int get_callchain_buffers(int max_stack);
 extern void put_callchain_buffers(void);
 extern struct perf_callchain_entry *get_callchain_entry(void);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 6cdbc5937b1..f9789d10fa4 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -221,21 +221,15 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
 #endif
 }
 
-struct perf_callchain_entry *
-get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
-		   u32 max_stack, bool crosstask, bool add_mark, u64 defer_cookie)
+int __get_perf_callchain(struct perf_callchain_entry *entry, struct pt_regs *regs, bool kernel,
+			 bool user, u32 max_stack, bool crosstask, bool add_mark, u64 defer_cookie)
 {
-	struct perf_callchain_entry *entry;
 	struct perf_callchain_entry_ctx ctx;
 	int start_entry_idx;
 
 	/* crosstask is not supported for user stacks */
 	if (crosstask && user && !kernel)
-		return NULL;
-
-	entry = get_callchain_entry();
-	if (!entry)
-		return NULL;
+		return -EINVAL;
 
 	ctx.entry		= entry;
 	ctx.max_stack		= max_stack;
@@ -252,7 +246,7 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
 	if (user && !crosstask) {
 		if (!user_mode(regs)) {
 			if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
-				goto exit_put;
+				return 0;
 			regs = task_pt_regs(current);
 		}
 
@@ -265,7 +259,7 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
 			 */
 			perf_callchain_store_context(&ctx, PERF_CONTEXT_USER_DEFERRED);
 			perf_callchain_store_context(&ctx, defer_cookie);
-			goto exit_put;
+			return 0;
 		}
 
 		if (add_mark)
@@ -275,9 +269,25 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
 		perf_callchain_user(&ctx, regs);
 		fixup_uretprobe_trampoline_entries(entry, start_entry_idx);
 	}
+	return 0;
+}
+
+struct perf_callchain_entry *
+get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
+		   u32 max_stack, bool crosstask, bool add_mark, u64 defer_cookie)
+{
+	struct perf_callchain_entry *entry;
+	int ret;
+
+	entry = get_callchain_entry();
+	if (!entry)
+		return NULL;
 
-exit_put:
+	ret = __get_perf_callchain(entry, regs, kernel, user, max_stack, crosstask, add_mark,
+				   defer_cookie);
 	put_callchain_entry(entry);
+	if (ret)
+		entry = NULL;
 
 	return entry;
 }
-- 
2.48.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ