lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260109153420.32181-3-leon.hwang@linux.dev>
Date: Fri,  9 Jan 2026 23:34:19 +0800
From: Leon Hwang <leon.hwang@...ux.dev>
To: bpf@...r.kernel.org
Cc: Alexei Starovoitov <ast@...nel.org>,
	Daniel Borkmann <daniel@...earbox.net>,
	Andrii Nakryiko <andrii@...nel.org>,
	Martin KaFai Lau <martin.lau@...ux.dev>,
	Eduard Zingerman <eddyz87@...il.com>,
	Song Liu <song@...nel.org>,
	Yonghong Song <yonghong.song@...ux.dev>,
	John Fastabend <john.fastabend@...il.com>,
	KP Singh <kpsingh@...nel.org>,
	Stanislav Fomichev <sdf@...ichev.me>,
	Hao Luo <haoluo@...gle.com>,
	Jiri Olsa <jolsa@...nel.org>,
	"David S . Miller" <davem@...emloft.net>,
	David Ahern <dsahern@...nel.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	Borislav Petkov <bp@...en8.de>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	x86@...nel.org,
	"H . Peter Anvin" <hpa@...or.com>,
	Matt Bobrowski <mattbobrowski@...gle.com>,
	Steven Rostedt <rostedt@...dmis.org>,
	Masami Hiramatsu <mhiramat@...nel.org>,
	Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
	Shuah Khan <shuah@...nel.org>,
	Leon Hwang <leon.hwang@...ux.dev>,
	netdev@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	linux-trace-kernel@...r.kernel.org,
	linux-kselftest@...r.kernel.org,
	kernel-patches-bot@...com
Subject: [PATCH bpf-next 2/3] bpf: Introduce BPF_BRANCH_SNAPSHOT_F_COPY flag for bpf_get_branch_snapshot helper

Introduce BPF_BRANCH_SNAPSHOT_F_COPY flag for tracing programs to copy
branch entries from *bpf_branch_snapshot*.

Instead of introducing a new kfunc, extend bpf_get_branch_snapshot
helper to add the BPF_BRANCH_SNAPSHOT_F_COPY flag support.

Therefore, when BPF_BRANCH_SNAPSHOT_F_COPY is specified:

* Check the *flags* value in verifier's 'check_helper_call()'.
* Skip inlining 'bpf_get_branch_snapshot()' helper in verifier's
  'do_misc_fixups()'.
* 'memcpy()' branch entries in the 'bpf_get_branch_snapshot()' helper.

Signed-off-by: Leon Hwang <leon.hwang@...ux.dev>
---
 include/linux/bpf.h          |  4 ++++
 include/linux/bpf_verifier.h |  1 +
 kernel/bpf/verifier.c        | 30 ++++++++++++++++++++++++++++++
 kernel/trace/bpf_trace.c     | 17 ++++++++++++++---
 4 files changed, 49 insertions(+), 3 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 16dc21836a06..71ce225e5160 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1249,6 +1249,10 @@ struct bpf_tramp_branch_entries {
 DECLARE_PER_CPU(struct bpf_tramp_branch_entries, bpf_branch_snapshot);
 #endif

+enum {
+	BPF_BRANCH_SNAPSHOT_F_COPY	= 1,	/* Copy branch snapshot from bpf_branch_snapshot. */
+};
+
 /* Different use cases for BPF trampoline:
  * 1. replace nop at the function entry (kprobe equivalent)
  *    flags = BPF_TRAMP_F_RESTORE_REGS
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 130bcbd66f60..c60a145e0466 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -561,6 +561,7 @@ struct bpf_insn_aux_data {
 	bool non_sleepable; /* helper/kfunc may be called from non-sleepable context */
 	bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
 	bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
+	bool copy_branch_snapshot; /* BPF_BRANCH_SNAPSHOT_F_COPY for bpf_get_branch_snapshot helper */
 	u8 alu_state; /* used in combination with alu_limit */
 	/* true if STX or LDX instruction is a part of a spill/fill
 	 * pattern for a bpf_fastcall call.
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 53635ea2e41b..0a537f9c2f8c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -11772,6 +11772,33 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
 		err = push_callback_call(env, insn, insn_idx, meta.subprogno,
 					 set_user_ringbuf_callback_state);
 		break;
+	case BPF_FUNC_get_branch_snapshot:
+	{
+		u64 flags;
+
+		if (!is_reg_const(&regs[BPF_REG_3], false)) {
+			verbose(env, "Flags in bpf_get_branch_snapshot helper must be const.\n");
+			return -EINVAL;
+		}
+		flags = reg_const_value(&regs[BPF_REG_3], false);
+		if (flags & ~BPF_BRANCH_SNAPSHOT_F_COPY) {
+			verbose(env, "Invalid flags in bpf_get_branch_snapshot helper.\n");
+			return -EINVAL;
+		}
+
+		if (flags & BPF_BRANCH_SNAPSHOT_F_COPY) {
+			if (env->prog->type != BPF_PROG_TYPE_TRACING ||
+			    (env->prog->expected_attach_type != BPF_TRACE_FENTRY &&
+			     env->prog->expected_attach_type != BPF_TRACE_FEXIT)) {
+				verbose(env, "Only fentry and fexit programs support BPF_BRANCH_SNAPSHOT_F_COPY.\n");
+				return -EINVAL;
+			}
+
+			env->insn_aux_data[insn_idx].copy_branch_snapshot = true;
+			env->prog->copy_branch_snapshot = true;
+		}
+		break;
+	}
 	}

 	if (err)
@@ -23370,6 +23397,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
 			 */
 			BUILD_BUG_ON(br_entry_size != 24);

+			if (env->insn_aux_data[i + delta].copy_branch_snapshot)
+				goto patch_call_imm;
+
 			/* if (unlikely(flags)) return -EINVAL */
 			insn_buf[0] = BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 0, 7);

diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 6e076485bf70..e9e1698cf608 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1172,10 +1172,20 @@ BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
 	u32 entry_cnt = size / br_entry_size;

-	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
-
-	if (unlikely(flags))
+	if (likely(!flags)) {
+		entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
+#ifdef CONFIG_X86_64
+	} else if (flags & BPF_BRANCH_SNAPSHOT_F_COPY) {
+		struct bpf_tramp_branch_entries *br;
+
+		br = this_cpu_ptr(&bpf_branch_snapshot);
+		entry_cnt = min_t(u32, entry_cnt, br->cnt);
+		if (entry_cnt)
+			memcpy(buf, (void *) br->entries, entry_cnt * br_entry_size);
+#endif
+	} else {
 		return -EINVAL;
+	}

 	if (!entry_cnt)
 		return -ENOENT;
@@ -1189,6 +1199,7 @@ const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
 	.ret_type	= RET_INTEGER,
 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
+	.arg3_type	= ARG_ANYTHING,
 };

 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
--
2.52.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ