[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <74bd0822-c8c1-47cc-b816-78036abff8ee@huaweicloud.com>
Date: Tue, 22 Jul 2025 11:44:26 +0800
From: Xu Kuohai <xukuohai@...weicloud.com>
To: Sami Tolvanen <samitolvanen@...gle.com>, bpf@...r.kernel.org,
Puranjay Mohan <puranjay@...nel.org>, Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>
Cc: Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>,
Andrii Nakryiko <andrii@...nel.org>, Mark Rutland <mark.rutland@....com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Maxwell Bland <mbland@...orola.com>, Puranjay Mohan <puranjay12@...il.com>,
Dao Huang <huangdao1@...o.com>
Subject: Re: [PATCH bpf-next v12 3/3] arm64/cfi,bpf: Support kCFI + BPF on
arm64
On 7/22/2025 4:20 AM, Sami Tolvanen wrote:
> From: Puranjay Mohan <puranjay12@...il.com>
>
> Currently, bpf_dispatcher_*_func() is marked with `__nocfi` therefore
> calling BPF programs from this interface doesn't cause CFI warnings.
>
> When BPF programs are called directly from C: from BPF helpers or
> struct_ops, CFI warnings are generated.
>
> Implement proper CFI prologues for the BPF programs and callbacks and
> drop __nocfi for arm64. Fix the trampoline generation code to emit kCFI
> prologue when a struct_ops trampoline is being prepared.
>
> Signed-off-by: Puranjay Mohan <puranjay12@...il.com>
> Co-developed-by: Maxwell Bland <mbland@...orola.com>
> Signed-off-by: Maxwell Bland <mbland@...orola.com>
> Co-developed-by: Sami Tolvanen <samitolvanen@...gle.com>
> Signed-off-by: Sami Tolvanen <samitolvanen@...gle.com>
> Tested-by: Dao Huang <huangdao1@...o.com>
> Acked-by: Will Deacon <will@...nel.org>
> ---
> arch/arm64/include/asm/cfi.h | 7 +++++++
> arch/arm64/net/bpf_jit_comp.c | 22 +++++++++++++++++++---
> 2 files changed, 26 insertions(+), 3 deletions(-)
> create mode 100644 arch/arm64/include/asm/cfi.h
>
> diff --git a/arch/arm64/include/asm/cfi.h b/arch/arm64/include/asm/cfi.h
> new file mode 100644
> index 000000000000..ab90f0351b7a
> --- /dev/null
> +++ b/arch/arm64/include/asm/cfi.h
> @@ -0,0 +1,7 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_ARM64_CFI_H
> +#define _ASM_ARM64_CFI_H
> +
> +#define __bpfcall
> +
> +#endif /* _ASM_ARM64_CFI_H */
> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> index 89b1b8c248c6..f4a98c1a1583 100644
> --- a/arch/arm64/net/bpf_jit_comp.c
> +++ b/arch/arm64/net/bpf_jit_comp.c
> @@ -10,6 +10,7 @@
> #include <linux/arm-smccc.h>
> #include <linux/bitfield.h>
> #include <linux/bpf.h>
> +#include <linux/cfi.h>
> #include <linux/filter.h>
> #include <linux/memory.h>
> #include <linux/printk.h>
> @@ -166,6 +167,12 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
> emit(insn, ctx);
> }
>
> +static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx)
> +{
> + if (IS_ENABLED(CONFIG_CFI_CLANG))
> + emit(hash, ctx);
I guess this won't work on big-endian cpus, since arm64 instructions
are always stored in little-endian, but data not.
> +}
> +
> /*
> * Kernel addresses in the vmalloc space use at most 48 bits, and the
> * remaining bits are guaranteed to be 0x1. So we can compose the address
> @@ -476,7 +483,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
> const bool is_main_prog = !bpf_is_subprog(prog);
> const u8 fp = bpf2a64[BPF_REG_FP];
> const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
> - const int idx0 = ctx->idx;
> int cur_offset;
>
> /*
> @@ -502,6 +508,9 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
> *
> */
>
> + emit_kcfi(is_main_prog ? cfi_bpf_hash : cfi_bpf_subprog_hash, ctx);
> + const int idx0 = ctx->idx;
move the idx0 definition back to its original position to match the
coding style of the rest of the file?
> +
> /* bpf function may be invoked by 3 instruction types:
> * 1. bl, attached via freplace to bpf prog via short jump
> * 2. br, attached via freplace to bpf prog via long jump
> @@ -2055,9 +2064,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> jit_data->ro_header = ro_header;
> }
>
> - prog->bpf_func = (void *)ctx.ro_image;
> + prog->bpf_func = (void *)ctx.ro_image + cfi_get_offset();
> prog->jited = 1;
> - prog->jited_len = prog_size;
> + prog->jited_len = prog_size - cfi_get_offset();
>
> if (!prog->is_func || extra_pass) {
> int i;
> @@ -2426,6 +2435,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
> /* return address locates above FP */
> retaddr_off = stack_size + 8;
>
> + if (flags & BPF_TRAMP_F_INDIRECT) {
> + /*
> + * Indirect call for bpf_struct_ops
> + */
> + emit_kcfi(cfi_get_func_hash(func_addr), ctx);
> + }
> /* bpf trampoline may be invoked by 3 instruction types:
> * 1. bl, attached to bpf prog or kernel function via short jump
> * 2. br, attached to bpf prog or kernel function via long jump
> @@ -2942,6 +2957,7 @@ void bpf_jit_free(struct bpf_prog *prog)
> sizeof(jit_data->header->size));
> kfree(jit_data);
> }
> + prog->bpf_func -= cfi_get_offset();
> hdr = bpf_jit_binary_pack_hdr(prog);
> bpf_jit_binary_pack_free(hdr, NULL);
> WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
Powered by blists - more mailing lists