lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260126074331.815684-4-chen.dylane@linux.dev>
Date: Mon, 26 Jan 2026 15:43:31 +0800
From: Tao Chen <chen.dylane@...ux.dev>
To: peterz@...radead.org,
	mingo@...hat.com,
	acme@...nel.org,
	namhyung@...nel.org,
	mark.rutland@....com,
	alexander.shishkin@...ux.intel.com,
	jolsa@...nel.org,
	irogers@...gle.com,
	adrian.hunter@...el.com,
	kan.liang@...ux.intel.com,
	song@...nel.org,
	ast@...nel.org,
	daniel@...earbox.net,
	andrii@...nel.org,
	martin.lau@...ux.dev,
	eddyz87@...il.com,
	yonghong.song@...ux.dev,
	john.fastabend@...il.com,
	kpsingh@...nel.org,
	sdf@...ichev.me,
	haoluo@...gle.com
Cc: linux-perf-users@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	bpf@...r.kernel.org,
	Tao Chen <chen.dylane@...ux.dev>
Subject: [PATCH bpf-next v8 3/3] bpf: Hold ther perf callchain entry until used completely

As Alexei noted, get_perf_callchain() return values may be reused
if a task is preempted after the BPF program enters migrate disable
mode. The perf_callchain_entres has a small stack of entries, and
we can reuse it as follows:

1. get the perf callchain entry
2. BPF use...
3. put the perf callchain entry

And Peter suggested that get_recursion_context used with preemption
disabled, so we should disable preemption at BPF side.

Acked-by: Yonghong Song <yonghong.song@...ux.dev>
Signed-off-by: Tao Chen <chen.dylane@...ux.dev>
---
 kernel/bpf/stackmap.c | 55 ++++++++++++++++++++++++++++++++++++-------
 1 file changed, 47 insertions(+), 8 deletions(-)

diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index e77dcdc2164..6bdee6cc05f 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -215,7 +215,9 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
 #ifdef CONFIG_STACKTRACE
 	struct perf_callchain_entry *entry;
 
+	preempt_disable();
 	entry = get_callchain_entry();
+	preempt_enable();
 
 	if (!entry)
 		return NULL;
@@ -237,14 +239,40 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
 			to[i] = (u64)(from[i]);
 	}
 
-	put_callchain_entry(entry);
-
 	return entry;
 #else /* CONFIG_STACKTRACE */
 	return NULL;
 #endif
 }
 
+static struct perf_callchain_entry *
+bpf_get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, int max_stack,
+		       bool crosstask)
+{
+	struct perf_callchain_entry *entry;
+	int ret;
+
+	preempt_disable();
+	entry = get_callchain_entry();
+	preempt_enable();
+
+	if (unlikely(!entry))
+		return NULL;
+
+	ret = __get_perf_callchain(entry, regs, kernel, user, max_stack, crosstask, false, 0);
+	if (ret) {
+		put_callchain_entry(entry);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static void bpf_put_perf_callchain(struct perf_callchain_entry *entry)
+{
+	put_callchain_entry(entry);
+}
+
 static long __bpf_get_stackid(struct bpf_map *map,
 			      struct perf_callchain_entry *trace, u64 flags)
 {
@@ -327,20 +355,23 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
 	struct perf_callchain_entry *trace;
 	bool kernel = !user;
 	u32 max_depth;
+	int ret;
 
 	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
 		return -EINVAL;
 
 	max_depth = stack_map_calculate_max_depth(map->value_size, elem_size, flags);
-	trace = get_perf_callchain(regs, kernel, user, max_depth,
-				   false, false, 0);
+	trace = bpf_get_perf_callchain(regs, kernel, user, max_depth, false);
 
 	if (unlikely(!trace))
 		/* couldn't fetch the stack trace */
 		return -EFAULT;
 
-	return __bpf_get_stackid(map, trace, flags);
+	ret = __bpf_get_stackid(map, trace, flags);
+	bpf_put_perf_callchain(trace);
+
+	return ret;
 }
 
 const struct bpf_func_proto bpf_get_stackid_proto = {
@@ -468,13 +499,19 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 	} else if (kernel && task) {
 		trace = get_callchain_entry_for_task(task, max_depth);
 	} else {
-		trace = get_perf_callchain(regs, kernel, user, max_depth,
-					   crosstask, false, 0);
+		trace = bpf_get_perf_callchain(regs, kernel, user, max_depth, crosstask);
 	}
 
-	if (unlikely(!trace) || trace->nr < skip) {
+	if (unlikely(!trace)) {
+		if (may_fault)
+			rcu_read_unlock();
+		goto err_fault;
+	}
+	if (trace->nr < skip) {
 		if (may_fault)
 			rcu_read_unlock();
+		if (!trace_in)
+			bpf_put_perf_callchain(trace);
 		goto err_fault;
 	}
 
@@ -495,6 +532,8 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 	/* trace/ips should not be dereferenced after this point */
 	if (may_fault)
 		rcu_read_unlock();
+	if (!trace_in)
+		bpf_put_perf_callchain(trace);
 
 	if (user_build_id)
 		stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
-- 
2.48.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ