[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <493cebef-4df6-4cf8-898b-483a10889359@linux.ibm.com>
Date: Fri, 16 Jan 2026 13:18:07 +0530
From: Hari Bathini <hbathini@...ux.ibm.com>
To: adubey <adubey@...p.linux.ibm.com>
Cc: adubey@...ux.ibm.com, bpf@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
linux-kselftest@...r.kernel.org, linux-kernel@...r.kernel.org,
sachinpb@...ux.ibm.com, venkat88@...ux.ibm.com, andrii@...nel.org,
eddyz87@...il.com, mykolal@...com, ast@...nel.org,
daniel@...earbox.net, martin.lau@...ux.dev, song@...nel.org,
yonghong.song@...ux.dev, john.fastabend@...il.com, kpsingh@...nel.org,
sdf@...ichev.me, haoluo@...gle.com, jolsa@...nel.org,
christophe.leroy@...roup.eu, naveen@...nel.org, maddy@...ux.ibm.com,
mpe@...erman.id.au, npiggin@...il.com, memxor@...il.com,
iii@...ux.ibm.com, shuah@...nel.org
Subject: Re: [PATCH v2 5/6] powerpc64/bpf: Support exceptions
On 16/01/26 12:17 pm, adubey wrote:
> On 2026-01-16 12:14, adubey wrote:
>> On 2026-01-16 11:57, Hari Bathini wrote:
>>> On 14/01/26 5:14 pm, adubey@...ux.ibm.com wrote:
>>>> From: Abhishek Dubey <adubey@...ux.ibm.com>
>>>>
>>>> The modified prologue/epilogue generation code now
>>>> enables exception-callback to use the stack frame of
>>>> the program marked as exception boundary, where callee
>>>> saved registers are stored.
>>>>
>>>> As per ppc64 ABIv2 documentation[1], r14-r31 are callee
>>>> saved registers. BPF programs on ppc64 already saves
>>>> r26-r31 registers. Saving the remaining set of callee
>>>> saved registers(r14-r25) is handled in the next patch.
>>>>
>>>> [1] https://ftp.rtems.org/pub/rtems/people/sebh/
>>>> ABI64BitOpenPOWERv1.1_16July2015_pub.pdf
>>>>
>>>> Signed-off-by: Abhishek Dubey <adubey@...ux.ibm.com>
>>>> ---
>>>> arch/powerpc/net/bpf_jit.h | 2 ++
>>>> arch/powerpc/net/bpf_jit_comp.c | 7 ++++
>>>> arch/powerpc/net/bpf_jit_comp64.c | 53 ++++++++++++++++++++
>>>> +----------
>>>> 3 files changed, 45 insertions(+), 17 deletions(-)
>>>>
>>>> diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
>>>> index 5d735bc5e6bd..fb548ae5d143 100644
>>>> --- a/arch/powerpc/net/bpf_jit.h
>>>> +++ b/arch/powerpc/net/bpf_jit.h
>>>> @@ -179,6 +179,8 @@ struct codegen_context {
>>>> u64 arena_vm_start;
>>>> u64 user_vm_start;
>>>> bool is_subprog;
>>>> + bool exception_boundary;
>>>> + bool exception_cb;
>>>> };
>>>> #define bpf_to_ppc(r) (ctx->b2p[r])
>>>> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/
>>>> bpf_jit_comp.c
>>>> index e3088cf089d1..26991940d36e 100644
>>>> --- a/arch/powerpc/net/bpf_jit_comp.c
>>>> +++ b/arch/powerpc/net/bpf_jit_comp.c
>>>> @@ -207,6 +207,8 @@ struct bpf_prog *bpf_int_jit_compile(struct
>>>> bpf_prog *fp)
>>>> cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux-
>>>> >arena);
>>>> cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux-
>>>> >arena);
>>>> cgctx.is_subprog = bpf_is_subprog(fp);
>>>> + cgctx.exception_boundary = fp->aux->exception_boundary;
>>>> + cgctx.exception_cb = fp->aux->exception_cb;
>>>> /* Scouting faux-generate pass 0 */
>>>> if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0,
>>>> false)) {
>>>> @@ -436,6 +438,11 @@ void bpf_jit_free(struct bpf_prog *fp)
>>>> bpf_prog_unlock_free(fp);
>>>> }
>>>> +bool bpf_jit_supports_exceptions(void)
>>>> +{
>>>> + return IS_ENABLED(CONFIG_PPC64);
>>>> +}
>>>> +
>>>> bool bpf_jit_supports_subprog_tailcalls(void)
>>>> {
>>>> return IS_ENABLED(CONFIG_PPC64);
>>>> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/
>>>> bpf_jit_comp64.c
>>>> index ec58395f74f7..a6083dd9786c 100644
>>>> --- a/arch/powerpc/net/bpf_jit_comp64.c
>>>> +++ b/arch/powerpc/net/bpf_jit_comp64.c
>>>> @@ -89,7 +89,9 @@ static inline bool bpf_has_stack_frame(struct
>>>> codegen_context *ctx)
>>>> * - the bpf program uses its stack area
>>>> * The latter condition is deduced from the usage of BPF_REG_FP
>>>> */
>>>> - return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx,
>>>> bpf_to_ppc(BPF_REG_FP));
>>>> + return ctx->seen & SEEN_FUNC ||
>>>> + bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) ||
>>>> + ctx->exception_cb;
>>>> }
>>>> /*
>>>> @@ -190,23 +192,32 @@ void bpf_jit_build_prologue(u32 *image, struct
>>>> codegen_context *ctx)
>>>> EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx-
>>>> >stack_size)));
>>>> }
>>>> - /*
>>>> - * Back up non-volatile regs -- BPF registers 6-10
>>>> - * If we haven't created our own stack frame, we save these
>>>> - * in the protected zone below the previous stack frame
>>>> - */
>>>> - for (i = BPF_REG_6; i <= BPF_REG_10; i++)
>>>> - if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
>>>> - EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1,
>>>> bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
>>>> + if (!ctx->exception_cb) {
>>>> + /*
>>>> + * Back up non-volatile regs -- BPF registers 6-10
>>>> + * If we haven't created our own stack frame, we save these
>>>> + * in the protected zone below the previous stack frame
>>>> + */
>>>> + for (i = BPF_REG_6; i <= BPF_REG_10; i++)
>>>> + if (ctx->exception_boundary ||
>>>> bpf_is_seen_register(ctx, bpf_to_ppc(i)))
>>>> + EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1,
>>>> + bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
>>>> - if (ctx->arena_vm_start)
>>>> - EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
>>>> + if (ctx->exception_boundary || ctx->arena_vm_start)
>>>> + EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
>>>> bpf_jit_stack_offsetof(ctx,
>>>> bpf_to_ppc(ARENA_VM_START))));
>>>> - /* Setup frame pointer to point to the bpf stack area */
>>>> - if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
>>>> - EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
>>>> + /* Setup frame pointer to point to the bpf stack area */
>>>> + if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
>>>> + EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
>>>> STACK_FRAME_MIN_SIZE + ctx->stack_size));
>>>> + } else {
>>>> + /*
>>>> + * Exception callback receives Frame Pointer of main
>>>> + * program as third arg
>>>> + */
>>>> + EMIT(PPC_RAW_MR(_R1, _R5));
>>>> + }
>>>> if (ctx->arena_vm_start)
>>>> PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start);
>>>> @@ -218,17 +229,25 @@ static void bpf_jit_emit_common_epilogue(u32
>>>> *image, struct codegen_context *ctx
>>>> /* Restore NVRs */
>>>> for (i = BPF_REG_6; i <= BPF_REG_10; i++)
>>>> - if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
>>>> + if (ctx->exception_cb || bpf_is_seen_register(ctx,
>>>> bpf_to_ppc(i)))
>>>> EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1,
>>>> bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
>>>> - if (ctx->arena_vm_start)
>>>> + if (ctx->exception_cb || ctx->arena_vm_start)
>>>> EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1,
>>>> bpf_jit_stack_offsetof(ctx,
>>>> bpf_to_ppc(ARENA_VM_START))));
>>>>
>>>
>>>> + if (ctx->exception_cb) {
>>>> + /*
>>>> + * LR value from boundary-frame is received as second
>>>> parameter
>>>> + * in exception callback.
>>>> + */
>>>> + EMIT(PPC_RAW_MTLR(_R4));
>>>> + }
>>>> +
>>>
>>> No. Both second and third parameter of exception_cb() are stack pointer
>>> (or frame pointer, if you prefer that) and not LR.
>>> The above hunk is wrong. It still worked because of mtlr
>>> instruction below that restored LR from PPC_LR_STKOFF offset
>>> on the stack. Please drop the above hunk.
>> Okay, but from patch 3/6:
> My mistake, its patch 4/6.
Please note the consume_fn() is not exception_cb(). It is
bpf_stack_walker() that helps with the stack walk to be precise.
You can find the exception_cb() call in bpf_throw() kfunc as:
ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
>>
>> ip = frame[STACK_FRAME_LR_SAVE];
>> ...
>> if (ip && !consume_fn(cookie, ip, fp, fp))
>> we send cookie as first, LR as second parameter and fp as third &
>> fourth both.
>> Default callback expects 4 parameter, so we copy fp in both.
>>
>>>
>>>> /* Tear down our stack frame */
>>>> if (bpf_has_stack_frame(ctx)) {
>>>> EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx-
>>>> >stack_size));
>>>> - if (ctx->seen & SEEN_FUNC) {
>>>> + if (ctx->seen & SEEN_FUNC || ctx->exception_cb) {
>>>> EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
>>>> EMIT(PPC_RAW_MTLR(_R0));
>>>> }
>>>
- Hari
Powered by blists - more mailing lists