lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <ef0c9d64-1742-4ae9-af4e-868287328c06@linux.ibm.com>
Date: Fri, 23 Jan 2026 23:43:35 +0530
From: Hari Bathini <hbathini@...ux.ibm.com>
To: adubey <adubey@...p.linux.ibm.com>
Cc: adubey@...ux.ibm.com, bpf@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
        linux-kselftest@...r.kernel.org, linux-kernel@...r.kernel.org,
        sachinpb@...ux.ibm.com, venkat88@...ux.ibm.com, andrii@...nel.org,
        eddyz87@...il.com, mykolal@...com, ast@...nel.org,
        daniel@...earbox.net, martin.lau@...ux.dev, song@...nel.org,
        yonghong.song@...ux.dev, john.fastabend@...il.com, kpsingh@...nel.org,
        sdf@...ichev.me, haoluo@...gle.com, jolsa@...nel.org,
        christophe.leroy@...roup.eu, naveen@...nel.org, maddy@...ux.ibm.com,
        mpe@...erman.id.au, npiggin@...il.com, memxor@...il.com,
        iii@...ux.ibm.com, shuah@...nel.org
Subject: Re: [PATCH v4 5/6] powerpc64/bpf: Support exceptions



On 23/01/26 7:23 pm, adubey wrote:
> On 2026-01-23 18:24, Hari Bathini wrote:
>> On 23/01/26 2:48 am, adubey@...ux.ibm.com wrote:
>>> From: Abhishek Dubey <adubey@...ux.ibm.com>
>>>
>>> The modified prologue/epilogue generation code now
>>> enables exception-callback to use the stack frame of
>>> the program marked as exception boundary, where callee
>>> saved registers are stored.
>>>
>>> As per ppc64 ABIv2 documentation[1], r14-r31 are callee
>>> saved registers. BPF programs on ppc64 already saves
>>> r26-r31 registers. Saving the remaining set of callee
>>> saved registers(r14-r25) is handled in the next patch.
>>>
>>> [1] https://ftp.rtems.org/pub/rtems/people/sebh/ 
>>> ABI64BitOpenPOWERv1.1_16July2015_pub.pdf
>>>
>>> Signed-off-by: Abhishek Dubey <adubey@...ux.ibm.com>
>>> ---
>>>   arch/powerpc/net/bpf_jit.h        |  2 ++
>>>   arch/powerpc/net/bpf_jit_comp.c   |  7 ++++
>>>   arch/powerpc/net/bpf_jit_comp64.c | 58 +++++++++++++++++++++----------
>>>   3 files changed, 48 insertions(+), 19 deletions(-)
>>>
>>> diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
>>> index 56f56fdd4969..82bbf63f0e57 100644
>>> --- a/arch/powerpc/net/bpf_jit.h
>>> +++ b/arch/powerpc/net/bpf_jit.h
>>> @@ -179,6 +179,8 @@ struct codegen_context {
>>>       u64 arena_vm_start;
>>>       u64 user_vm_start;
>>>       bool is_subprog;
>>> +    bool exception_boundary;
>>> +    bool exception_cb;
>>>   };
>>>     #define bpf_to_ppc(r)    (ctx->b2p[r])
>>> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/ 
>>> bpf_jit_comp.c
>>> index 1a305f0fed27..2607ea0bedef 100644
>>> --- a/arch/powerpc/net/bpf_jit_comp.c
>>> +++ b/arch/powerpc/net/bpf_jit_comp.c
>>> @@ -207,6 +207,8 @@ struct bpf_prog *bpf_int_jit_compile(struct 
>>> bpf_prog *fp)
>>>       cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux- 
>>> >arena);
>>>       cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena);
>>>       cgctx.is_subprog = bpf_is_subprog(fp);
>>> +    cgctx.exception_boundary = fp->aux->exception_boundary;
>>> +    cgctx.exception_cb = fp->aux->exception_cb;
>>>         /* Scouting faux-generate pass 0 */
>>>       if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
>>> @@ -436,6 +438,11 @@ void bpf_jit_free(struct bpf_prog *fp)
>>>       bpf_prog_unlock_free(fp);
>>>   }
>>>   +bool bpf_jit_supports_exceptions(void)
>>> +{
>>> +    return IS_ENABLED(CONFIG_PPC64);
>>> +}
>>> +
>>>   bool bpf_jit_supports_subprog_tailcalls(void)
>>>   {
>>>       return IS_ENABLED(CONFIG_PPC64);
>>> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/ 
>>> bpf_jit_comp64.c
>>> index c25ba1ad587a..d7cd8ab6559c 100644
>>> --- a/arch/powerpc/net/bpf_jit_comp64.c
>>> +++ b/arch/powerpc/net/bpf_jit_comp64.c
>>> @@ -89,7 +89,9 @@ static inline bool bpf_has_stack_frame(struct 
>>> codegen_context *ctx)
>>>        * - the bpf program uses its stack area
>>>        * The latter condition is deduced from the usage of BPF_REG_FP
>>>        */
>>> -    return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, 
>>> bpf_to_ppc(BPF_REG_FP));
>>> +    return ctx->seen & SEEN_FUNC ||
>>> +           bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) ||
>>> +           ctx->exception_cb;
>>>   }
>>>     /*
>>
>>
>>> @@ -161,8 +163,13 @@ void bpf_jit_build_prologue(u32 *image, struct 
>>> codegen_context *ctx)
>>
>>
>>
>>>           EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
>>>           /* this goes in the redzone */
>>>           EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, - 
>>> (BPF_PPC_TAILCALL)));
>>> -    } else {
>>> +    } else if (!ctx->exception_cb) {
>>>           /*
>>> +         * Tailcall jitting for non exception_cb progs only.
>>> +         * exception_cb won't require tail_call_info to be setup.
>>> +         *
>>> +         * tail_call_info interpretation logic:
>>> +         *
>>>            * if tail_call_info < MAX_TAIL_CALL_CNT
>>>            *      main prog calling first subprog -> copy reference
>>>            * else
>>> @@ -177,8 +184,12 @@ void bpf_jit_build_prologue(u32 *image, struct 
>>> codegen_context *ctx)
>>>           EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, - 
>>> (BPF_PPC_TAILCALL)));
>>>       }
>>>   -    if (bpf_has_stack_frame(ctx)) {
>>> +    if (bpf_has_stack_frame(ctx) && !ctx->exception_cb) {
>>>           /*
>>> +         * exception_cb uses boundary frame after stack walk.
>>> +         * It can simply use redzone, this optimization reduces
>>> +         * stack walk loop by one level.
>>> +         *
>>>            * We need a stack frame, but we don't necessarily need to
>>>            * save/restore LR unless we call other functions
>>>            */
>>> @@ -190,23 +201,32 @@ void bpf_jit_build_prologue(u32 *image, struct 
>>> codegen_context *ctx)
>>>           EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx- 
>>> >stack_size)));
>>>       }
>>>   -    /*
>>> -     * Back up non-volatile regs -- BPF registers 6-10
>>> -     * If we haven't created our own stack frame, we save these
>>> -     * in the protected zone below the previous stack frame
>>> -     */
>>> -    for (i = BPF_REG_6; i <= BPF_REG_10; i++)
>>> -        if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
>>> -            EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, 
>>> bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
>>> +    if (!ctx->exception_cb) {
>>> +        /*
>>> +         * Back up non-volatile regs -- BPF registers 6-10
>>> +         * If we haven't created our own stack frame, we save these
>>> +         * in the protected zone below the previous stack frame
>>> +         */
>>> +        for (i = BPF_REG_6; i <= BPF_REG_10; i++)
>>> +            if (ctx->exception_boundary || bpf_is_seen_register(ctx, 
>>> bpf_to_ppc(i)))
>>> +                EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1,
>>> +                    bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
>>>   -    if (ctx->arena_vm_start)
>>> -        EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
>>> +        if (ctx->exception_boundary || ctx->arena_vm_start)
>>> +            EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
>>>                    bpf_jit_stack_offsetof(ctx, 
>>> bpf_to_ppc(ARENA_VM_START))));
>>>   -    /* Setup frame pointer to point to the bpf stack area */
>>> -    if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
>>> -        EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
>>> +        /* Setup frame pointer to point to the bpf stack area */
>>> +        if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
>>> +            EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
>>>                   STACK_FRAME_MIN_SIZE + ctx->stack_size));
>>> +    } else {
>>> +        /*
>>> +         * Exception callback receives Frame Pointer of main
>>> +         * program as third arg
>>> +         */
>>> +        EMIT(PPC_RAW_MR(_R1, _R5));
>>> +    }
>>>         if (ctx->arena_vm_start)
>>>           PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start);
>>
>> For all practical purposes, the only thing that matters for the prologue
>> of execption_cb subprog seems to be, to set r1 and move on. The below
>> snippet before tailcall setup in the prologue should live the rest of
>> the prologue code unchanged?
>>
>> if (ctx->exception_cb) {
>>     /*
>>      * Exception callback receives Frame Pointer of main
>>
>>      * program as third arg
>>
>>      */
>>     EMIT(PPC_RAW_MR(_R1, _R5));
>>     return;
>> }
> This approach could work, but it may be fragile. If future changes to the
> callback logic start accessing fields from the active boundary frame, this
> could lead to incorrect memory access or corruption. Other archs have 
> kept it.
> Shall we keep it?

True. In fact, exception_cb isn't seem to be restricted from using
the BPF stack or arena...

So, the "Setup frame pointer to point to the bpf stack area" part
may also have to come out of that !exception_cb condition..

- Hari

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ