[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260102150032.53106-2-leon.hwang@linux.dev>
Date: Fri, 2 Jan 2026 23:00:29 +0800
From: Leon Hwang <leon.hwang@...ux.dev>
To: bpf@...r.kernel.org
Cc: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Eduard Zingerman <eddyz87@...il.com>,
Song Liu <song@...nel.org>,
Yonghong Song <yonghong.song@...ux.dev>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>,
Stanislav Fomichev <sdf@...ichev.me>,
Hao Luo <haoluo@...gle.com>,
Jiri Olsa <jolsa@...nel.org>,
Puranjay Mohan <puranjay@...nel.org>,
Xu Kuohai <xukuohai@...weicloud.com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
"David S . Miller" <davem@...emloft.net>,
David Ahern <dsahern@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H . Peter Anvin" <hpa@...or.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
netdev@...r.kernel.org,
kernel-patches-bot@...com,
Leon Hwang <leon.hwang@...ux.dev>
Subject: [PATCH bpf-next 1/4] bpf: tailcall: Introduce bpf_arch_tail_call_prologue_offset
Introduce bpf_arch_tail_call_prologue_offset() to allow architectures
to specify the offset from bpf_func to the actual program entry point
for tail calls. This offset accounts for prologue instructions that
should be skipped (e.g., fentry NOPs, TCC initialization).
When an architecture provides a non-zero prologue offset, prog arrays
allocate additional space to cache precomputed tail call targets:
array->ptrs[max_entries + index] = prog->bpf_func + prologue_offset
This cached target is updated atomically via xchg() when programs are
added or removed from the prog array, eliminating the need to compute
the target address at runtime during tail calls.
The function is exported for use by the test_bpf module.
Signed-off-by: Leon Hwang <leon.hwang@...ux.dev>
---
include/linux/bpf.h | 1 +
kernel/bpf/arraymap.c | 27 ++++++++++++++++++++++++++-
2 files changed, 27 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4e7d72dfbcd4..acd85c239af9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3792,6 +3792,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
struct bpf_prog *new, struct bpf_prog *old);
+int bpf_arch_tail_call_prologue_offset(void);
void *bpf_arch_text_copy(void *dst, void *src, size_t len);
int bpf_arch_text_invalidate(void *dst, size_t len);
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 1eeb31c5b317..beedd1281c22 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -127,6 +127,9 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array_size += (u64) max_entries * elem_size;
}
}
+ if (attr->map_type == BPF_MAP_TYPE_PROG_ARRAY && bpf_arch_tail_call_prologue_offset())
+ /* Store tailcall targets */
+ array_size += (u64) max_entries * sizeof(void *);
/* allocate all map elements and zero-initialize them */
if (attr->map_flags & BPF_F_MMAPABLE) {
@@ -1087,16 +1090,38 @@ void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
WARN_ON_ONCE(1);
}
+int __weak bpf_arch_tail_call_prologue_offset(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bpf_arch_tail_call_prologue_offset);
+
+static void bpf_tail_call_target_update(struct bpf_array *array, u32 key, struct bpf_prog *new)
+{
+ int offset = bpf_arch_tail_call_prologue_offset();
+ void *target;
+
+ if (!offset)
+ return;
+
+ target = new ? (void *) new->bpf_func + offset : 0;
+ xchg(array->ptrs + array->map.max_entries + key, target);
+}
+
static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
struct bpf_prog *old,
struct bpf_prog *new)
{
struct prog_poke_elem *elem;
struct bpf_array_aux *aux;
+ struct bpf_array *array;
- aux = container_of(map, struct bpf_array, map)->aux;
+ array = container_of(map, struct bpf_array, map);
+ aux = array->aux;
WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
+ bpf_tail_call_target_update(array, key, new);
+
list_for_each_entry(elem, &aux->poke_progs, list) {
struct bpf_jit_poke_descriptor *poke;
int i;
--
2.52.0
Powered by blists - more mailing lists