[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260122165716.10508-3-adubey@linux.ibm.com>
Date: Thu, 22 Jan 2026 22:27:12 +0530
From: adubey@...ux.ibm.com
To: bpf@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
linux-kselftest@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: hbathini@...ux.ibm.com, sachinpb@...ux.ibm.com, venkat88@...ux.ibm.com,
andrii@...nel.org, eddyz87@...il.com, mykolal@...com, ast@...nel.org,
daniel@...earbox.net, martin.lau@...ux.dev, song@...nel.org,
yonghong.song@...ux.dev, john.fastabend@...il.com, kpsingh@...nel.org,
sdf@...ichev.me, haoluo@...gle.com, jolsa@...nel.org,
christophe.leroy@...roup.eu, naveen@...nel.org, maddy@...ux.ibm.com,
mpe@...erman.id.au, npiggin@...il.com, memxor@...il.com,
iii@...ux.ibm.com, shuah@...nel.org,
Abhishek Dubey <adubey@...ux.ibm.com>
Subject: [PATCH v3 2/6] powerpc64/bpf: Support tailcalls with subprogs
From: Abhishek Dubey <adubey@...ux.ibm.com>
Enabling tailcalls with subprog combinations by referencing
method. The actual tailcall count is always maintained in the
tail_call_info variable present in the frame of main function
(also called entry function). The tail_call_info field in the
stack frame of subprogs contains reference to the tail_call_info
field in the stack frame of main BPF program.
Dynamic resolution interprets the tail_call_info either as
value or reference depending on the context of active frame
while tailcall is invoked.
Signed-off-by: Abhishek Dubey <adubey@...ux.ibm.com>
---
arch/powerpc/net/bpf_jit.h | 13 ++++++
arch/powerpc/net/bpf_jit_comp.c | 59 +++++++++++++++++++++++----
arch/powerpc/net/bpf_jit_comp64.c | 68 +++++++++++++++++++++++--------
3 files changed, 117 insertions(+), 23 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 9f6ec00bd02e..56f56fdd4969 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -52,6 +52,13 @@
EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
} while (0)
+/* When constant jump offset is known prior */
+#define PPC_BCC_CONST_SHORT(cond, offset) \
+ do { \
+ BUILD_BUG_ON(offset < -0x8000 || offset > 0x7fff || (offset & 0x3)); \
+ EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
+ } while (0)
+
/*
* Sign-extended 32-bit immediate load
*
@@ -73,6 +80,10 @@
} } while (0)
#ifdef CONFIG_PPC64
+
+/* for gpr non volatile registers BPG_REG_6 to 10 */
+#define BPF_PPC_STACK_SAVE (6 * 8)
+
/* If dummy pass (!image), account for maximum possible instructions */
#define PPC_LI64(d, i) do { \
if (!image) \
@@ -167,6 +178,7 @@ struct codegen_context {
unsigned int alt_exit_addr;
u64 arena_vm_start;
u64 user_vm_start;
+ bool is_subprog;
};
#define bpf_to_ppc(r) (ctx->b2p[r])
@@ -206,6 +218,7 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass
struct codegen_context *ctx, int insn_idx,
int jmp_off, int dst_reg, u32 code);
+int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx);
#endif
#endif
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index d5757577f933..f3ee031edc26 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -206,6 +206,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena);
cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena);
+ cgctx.is_subprog = bpf_is_subprog(fp);
/* Scouting faux-generate pass 0 */
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
@@ -435,6 +436,11 @@ void bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
+bool bpf_jit_supports_subprog_tailcalls(void)
+{
+ return IS_ENABLED(CONFIG_PPC64);
+}
+
bool bpf_jit_supports_kfunc_call(void)
{
return true;
@@ -600,15 +606,53 @@ static int invoke_bpf_mod_ret(u32 *image, u32 *ro_image, struct codegen_context
return 0;
}
-static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_context *ctx,
- int func_frame_offset, int r4_off)
+/*
+ * Refer the label 'Generated stack layout' in this file for actual stack
+ * layout during trampoline invocation.
+ *
+ * Refer __arch_prepare_bpf_trampoline() for stack component details.
+ *
+ * The tailcall count/reference is present in caller's stack frame. Its required
+ * to copy the content of tail_call_info before calling the actual function
+ * to which the trampoline is attached.
+ */
+static void bpf_trampoline_setup_tail_call_info(u32 *image, struct codegen_context *ctx,
+ int func_frame_offset,
+ int bpf_dummy_frame_size, int r4_off)
{
if (IS_ENABLED(CONFIG_PPC64)) {
/* See Generated stack layout */
- int tailcallcnt_offset = BPF_PPC_TAILCALL;
+ int tailcallinfo_offset = BPF_PPC_TAILCALL;
- EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset));
- EMIT(PPC_RAW_STL(_R3, _R1, -tailcallcnt_offset));
+ /*
+ * func_frame_offset = ...(1)
+ * bpf_dummy_frame_size + trampoline_frame_size
+ */
+ EMIT(PPC_RAW_LD(_R4, _R1, func_frame_offset));
+ EMIT(PPC_RAW_LD(_R3, _R4, -tailcallinfo_offset));
+
+ /*
+ * Setting the tail_call_info in trampoline's frame
+ * depending on if previous frame had value or reference.
+ */
+ EMIT(PPC_RAW_CMPLWI(_R3, MAX_TAIL_CALL_CNT));
+ PPC_BCC_CONST_SHORT(COND_GT, 8);
+ EMIT(PPC_RAW_ADDI(_R3, _R4, bpf_jit_stack_tailcallinfo_offset(ctx)));
+ /*
+ * From ...(1) above:
+ * trampoline_frame_bottom = ...(2)
+ * func_frame_offset - bpf_dummy_frame_size
+ *
+ * Using ...(2) derived above:
+ * trampoline_tail_call_info_offset = ...(3)
+ * trampoline_frame_bottom - tailcallinfo_offset
+ *
+ * From ...(3):
+ * Use trampoline_tail_call_info_offset to write reference of main's
+ * tail_call_info in trampoline frame.
+ */
+ EMIT(PPC_RAW_STL(_R3, _R1, (func_frame_offset - bpf_dummy_frame_size)
+ - tailcallinfo_offset));
} else {
/* See bpf_jit_stack_offsetof() and BPF_PPC_TC */
EMIT(PPC_RAW_LL(_R4, _R1, r4_off));
@@ -714,7 +758,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
* LR save area [ r0 save (64-bit) ] | header
* [ r0 save (32-bit) ] |
* dummy frame for unwind [ back chain 1 ] --
- * [ tail_call_cnt ] optional - 64-bit powerpc
+ * [ tail_call_info ] optional - 64-bit powerpc
* [ padding ] align stack frame
* r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc
* alt_lr_off [ real lr (ool stub)] optional - actual lr
@@ -905,7 +949,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
/* Replicate tail_call_cnt before calling the original BPF prog */
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
- bpf_trampoline_setup_tail_call_cnt(image, ctx, func_frame_offset, r4_off);
+ bpf_trampoline_setup_tail_call_info(image, ctx, func_frame_offset,
+ bpf_dummy_frame_size, r4_off);
/* Restore args */
bpf_trampoline_restore_args_stack(image, ctx, func_frame_offset, nr_regs, regs_off);
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 296e9ea14f2e..18da5a866447 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -26,8 +26,12 @@
* Ensure the top half (upto local_tmp_var) stays consistent
* with our redzone usage.
*
+ * tail_call_info - stores tailcall count value in main program's
+ * frame, stores reference to tail_call_info of
+ * main's frame in sub-prog's frame.
+ *
* [ prev sp ] <-------------
- * [ tail_call_cnt ] 8 |
+ * [ tail_call_info ] 8 |
* [ nv gpr save area ] 6*8 |
* [ local_tmp_var ] 24 |
* fp (r31) --> [ ebpf stack space ] upto 512 |
@@ -35,8 +39,6 @@
* sp (r1) ---> [ stack pointer ] --------------
*/
-/* for gpr non volatile registers BPG_REG_6 to 10 */
-#define BPF_PPC_STACK_SAVE (6*8)
/* for bpf JIT code internal usage */
#define BPF_PPC_STACK_LOCALS 24
/* stack frame excluding BPF stack, ensure this is quadword aligned */
@@ -98,7 +100,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* [ prev sp ] <-------------
* [ ... ] |
* sp (r1) ---> [ stack pointer ] --------------
- * [ tail_call_cnt ] 8
+ * [ tail_call_info ] 8
* [ nv gpr save area ] 6*8
* [ local_tmp_var ] 24
* [ unused red zone ] 224
@@ -114,7 +116,7 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
}
}
-static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
+int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx)
{
return bpf_jit_stack_local(ctx) + BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE;
}
@@ -147,17 +149,32 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
#endif
/*
- * Initialize tail_call_cnt if we do tail calls.
- * Otherwise, put in NOPs so that it can be skipped when we are
- * invoked through a tail call.
+ * Tail call count(tcc) is saved & updated only in main
+ * program's frame and the address of tcc in main program's
+ * frame (tcc_ptr) is saved in subprogs frame.
+ *
+ * Offset of tail_call_info on any frame will be interpreted
+ * as either tcc_ptr or tcc value depending on whether it is
+ * greater than MAX_TAIL_CALL_CNT or not.
*/
- if (ctx->seen & SEEN_TAILCALL) {
+ if (!ctx->is_subprog) {
EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
/* this goes in the redzone */
EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
} else {
- EMIT(PPC_RAW_NOP());
- EMIT(PPC_RAW_NOP());
+ /*
+ * if tail_call_info < MAX_TAIL_CALL_CNT
+ * main prog calling first subprog -> copy reference
+ * else
+ * subsequent subprog calling another subprog -> directly copy content
+ */
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, 0));
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), -(BPF_PPC_TAILCALL)));
+ EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
+ PPC_BCC_CONST_SHORT(COND_GT, 8);
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2),
+ -(BPF_PPC_TAILCALL)));
+ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
}
if (bpf_has_stack_frame(ctx)) {
@@ -352,19 +369,38 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
PPC_BCC_SHORT(COND_GE, out);
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallinfo_offset(ctx)));
+ EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
+ PPC_BCC_CONST_SHORT(COND_LE, 8);
+
+ /* dereference TMP_REG_1 */
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 0));
+
/*
- * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
+ * if (tail_call_info == MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
- PPC_BCC_SHORT(COND_GE, out);
+ PPC_BCC_SHORT(COND_EQ, out);
/*
- * tail_call_cnt++;
+ * tail_call_info++; <- Actual value of tcc here
*/
EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
- EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+
+ /*
+ * Before writing updated tail_call_info, distinguish if current frame
+ * is storing a reference to tail_call_info or actual tcc value in
+ * tail_call_info.
+ */
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx)));
+ EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_2), MAX_TAIL_CALL_CNT));
+ PPC_BCC_CONST_SHORT(COND_GT, 8);
+
+ /* First get address of tail_call_info */
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx)));
+ /* Writeback updated value to tail_call_info */
+ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
/* prog = array->ptrs[index]; */
EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
--
2.48.1
Powered by blists - more mailing lists