lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260102150032.53106-5-leon.hwang@linux.dev>
Date: Fri,  2 Jan 2026 23:00:32 +0800
From: Leon Hwang <leon.hwang@...ux.dev>
To: bpf@...r.kernel.org
Cc: Alexei Starovoitov <ast@...nel.org>,
	Daniel Borkmann <daniel@...earbox.net>,
	Andrii Nakryiko <andrii@...nel.org>,
	Martin KaFai Lau <martin.lau@...ux.dev>,
	Eduard Zingerman <eddyz87@...il.com>,
	Song Liu <song@...nel.org>,
	Yonghong Song <yonghong.song@...ux.dev>,
	John Fastabend <john.fastabend@...il.com>,
	KP Singh <kpsingh@...nel.org>,
	Stanislav Fomichev <sdf@...ichev.me>,
	Hao Luo <haoluo@...gle.com>,
	Jiri Olsa <jolsa@...nel.org>,
	Puranjay Mohan <puranjay@...nel.org>,
	Xu Kuohai <xukuohai@...weicloud.com>,
	Catalin Marinas <catalin.marinas@....com>,
	Will Deacon <will@...nel.org>,
	"David S . Miller" <davem@...emloft.net>,
	David Ahern <dsahern@...nel.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	Borislav Petkov <bp@...en8.de>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	x86@...nel.org,
	"H . Peter Anvin" <hpa@...or.com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	linux-arm-kernel@...ts.infradead.org,
	linux-kernel@...r.kernel.org,
	netdev@...r.kernel.org,
	kernel-patches-bot@...com,
	Leon Hwang <leon.hwang@...ux.dev>
Subject: [PATCH bpf-next 4/4] bpf, lib/test_bpf: Fix broken tailcall tests

Update the tail call tests in test_bpf to work with the new tail call
optimization that requires:
  1. A valid used_maps array pointing to the prog array
  2. Precomputed tail call targets in array->ptrs[max_entries + index]

Signed-off-by: Leon Hwang <leon.hwang@...ux.dev>
---
 lib/test_bpf.c | 39 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 34 insertions(+), 5 deletions(-)

diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index af0041df2b72..680d34d46f19 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -15448,26 +15448,45 @@ static void __init destroy_tail_call_tests(struct bpf_array *progs)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++)
-		if (progs->ptrs[i])
-			bpf_prog_free(progs->ptrs[i]);
+	for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
+		struct bpf_prog *fp = progs->ptrs[i];
+
+		if (!fp)
+			continue;
+
+		/*
+		 * The used_maps points to fake maps that don't have
+		 * proper ops, so clear it before bpf_prog_free to avoid
+		 * bpf_free_used_maps trying to process it.
+		 */
+		kfree(fp->aux->used_maps);
+		fp->aux->used_maps = NULL;
+		fp->aux->used_map_cnt = 0;
+		bpf_prog_free(fp);
+	}
 	kfree(progs);
 }
 
 static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
 {
+	int prologue_offset = bpf_arch_tail_call_prologue_offset();
 	int ntests = ARRAY_SIZE(tail_call_tests);
+	u32 max_entries = ntests + 1;
 	struct bpf_array *progs;
 	int which, err;
 
 	/* Allocate the table of programs to be used for tail calls */
-	progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL);
+	progs = kzalloc(struct_size(progs, ptrs, max_entries * 2), GFP_KERNEL);
 	if (!progs)
 		goto out_nomem;
 
+	/* Set max_entries before JIT, as it's used in JIT */
+	progs->map.max_entries = max_entries;
+
 	/* Create all eBPF programs and populate the table */
 	for (which = 0; which < ntests; which++) {
 		struct tail_call_test *test = &tail_call_tests[which];
+		struct bpf_map *map = &progs->map;
 		struct bpf_prog *fp;
 		int len, i;
 
@@ -15487,10 +15506,16 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
 		if (!fp)
 			goto out_nomem;
 
+		fp->aux->used_maps = kmalloc_array(1, sizeof(map), GFP_KERNEL);
+		if (!fp->aux->used_maps)
+			goto out_nomem;
+
 		fp->len = len;
 		fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
 		fp->aux->stack_depth = test->stack_depth;
 		fp->aux->tail_call_reachable = test->has_tail_call;
+		fp->aux->used_maps[0] = map;
+		fp->aux->used_map_cnt = 1;
 		memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
 
 		/* Relocate runtime tail call offsets and addresses */
@@ -15548,6 +15573,10 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
 				if ((long)__bpf_call_base + insn->imm != addr)
 					*insn = BPF_JMP_A(0); /* Skip: NOP */
 				break;
+
+			case BPF_JMP | BPF_TAIL_CALL:
+				insn->imm = 0;
+				break;
 			}
 		}
 
@@ -15555,11 +15584,11 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
 		if (err)
 			goto out_err;
 
+		progs->ptrs[max_entries + which] = (void *) fp->bpf_func + prologue_offset;
 		progs->ptrs[which] = fp;
 	}
 
 	/* The last entry contains a NULL program pointer */
-	progs->map.max_entries = ntests + 1;
 	*pprogs = progs;
 	return 0;
 
-- 
2.52.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ