[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240324211518.93892-2-puranjay12@gmail.com>
Date: Sun, 24 Mar 2024 21:15:18 +0000
From: Puranjay Mohan <puranjay12@...il.com>
To: Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Eduard Zingerman <eddyz87@...il.com>,
Song Liu <song@...nel.org>,
Yonghong Song <yonghong.song@...ux.dev>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>,
Stanislav Fomichev <sdf@...gle.com>,
Hao Luo <haoluo@...gle.com>,
Jiri Olsa <jolsa@...nel.org>,
Zi Shen Lim <zlim.lnx@...il.com>,
Mark Rutland <mark.rutland@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Mark Brown <broonie@...nel.org>,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org (open list),
bpf@...r.kernel.org (open list:BPF [GENERAL] (Safe Dynamic Programs and Tools)),
Josh Poimboeuf <jpoimboe@...nel.org>
Cc: puranjay12@...il.com
Subject: [PATCH bpf-next v2 1/1] arm64/cfi,bpf: Support kCFI + BPF on arm64
Currently, bpf_dispatcher_*_func() is marked with `__nocfi` therefore
calling BPF programs from this interface doesn't cause CFI warnings.
When BPF programs are called directly from C: from BPF helpers or
struct_ops, CFI warnings are generated.
Implement proper CFI prologues for the BPF programs and callbacks and
drop __nocfi for arm64. Fix the trampoline generation code to emit kCFI
prologue when a struct_ops trampoline is being prepared.
Signed-off-by: Puranjay Mohan <puranjay12@...il.com>
---
arch/arm64/include/asm/cfi.h | 23 ++++++++++++++
arch/arm64/kernel/alternative.c | 54 +++++++++++++++++++++++++++++++++
arch/arm64/net/bpf_jit_comp.c | 28 +++++++++++++----
3 files changed, 99 insertions(+), 6 deletions(-)
create mode 100644 arch/arm64/include/asm/cfi.h
diff --git a/arch/arm64/include/asm/cfi.h b/arch/arm64/include/asm/cfi.h
new file mode 100644
index 000000000000..670e191f8628
--- /dev/null
+++ b/arch/arm64/include/asm/cfi.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM64_CFI_H
+#define _ASM_ARM64_CFI_H
+
+#ifdef CONFIG_CFI_CLANG
+#define __bpfcall
+static inline int cfi_get_offset(void)
+{
+ return 4;
+}
+#define cfi_get_offset cfi_get_offset
+extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
+extern u32 cfi_get_func_hash(void *func);
+#else
+#define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
+static inline u32 cfi_get_func_hash(void *func)
+{
+ return 0;
+}
+#endif /* CONFIG_CFI_CLANG */
+#endif /* _ASM_ARM64_CFI_H */
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 8ff6610af496..1715da7df137 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -13,6 +13,7 @@
#include <linux/elf.h>
#include <asm/cacheflush.h>
#include <asm/alternative.h>
+#include <asm/cfi.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/module.h>
@@ -298,3 +299,56 @@ noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
}
EXPORT_SYMBOL(alt_cb_patch_nops);
+
+#ifdef CONFIG_CFI_CLANG
+struct bpf_insn;
+
+/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
+extern unsigned int __bpf_prog_runX(const void *ctx,
+ const struct bpf_insn *insn);
+
+/*
+ * Force a reference to the external symbol so the compiler generates
+ * __kcfi_typid.
+ */
+__ADDRESSABLE(__bpf_prog_runX);
+
+/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
+asm (
+" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
+" .type cfi_bpf_hash,@object \n"
+" .globl cfi_bpf_hash \n"
+" .p2align 2, 0x0 \n"
+"cfi_bpf_hash: \n"
+" .word __kcfi_typeid___bpf_prog_runX \n"
+" .size cfi_bpf_hash, 4 \n"
+" .popsection \n"
+);
+
+/* Must match bpf_callback_t */
+extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
+
+__ADDRESSABLE(__bpf_callback_fn);
+
+/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
+asm (
+" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
+" .type cfi_bpf_subprog_hash,@object \n"
+" .globl cfi_bpf_subprog_hash \n"
+" .p2align 2, 0x0 \n"
+"cfi_bpf_subprog_hash: \n"
+" .word __kcfi_typeid___bpf_callback_fn \n"
+" .size cfi_bpf_subprog_hash, 4 \n"
+" .popsection \n"
+);
+
+u32 cfi_get_func_hash(void *func)
+{
+ u32 hash;
+
+ if (get_kernel_nofault(hash, func - cfi_get_offset()))
+ return 0;
+
+ return hash;
+}
+#endif
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index bc16eb694657..2372812bb47c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -17,6 +17,7 @@
#include <asm/asm-extable.h>
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
+#include <asm/cfi.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/patching.h>
@@ -158,6 +159,12 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
emit(insn, ctx);
}
+static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx)
+{
+ if (IS_ENABLED(CONFIG_CFI_CLANG))
+ emit(hash, ctx);
+}
+
/*
* Kernel addresses in the vmalloc space use at most 48 bits, and the
* remaining bits are guaranteed to be 0x1. So we can compose the address
@@ -295,7 +302,7 @@ static bool is_lsi_offset(int offset, int scale)
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
- bool is_exception_cb)
+ bool is_exception_cb, bool is_subprog)
{
const struct bpf_prog *prog = ctx->prog;
const bool is_main_prog = !bpf_is_subprog(prog);
@@ -306,7 +313,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 tcc = bpf2a64[TCALL_CNT];
const u8 fpb = bpf2a64[FP_BOTTOM];
- const int idx0 = ctx->idx;
int cur_offset;
/*
@@ -332,6 +338,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
*
*/
+ emit_kcfi(is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash, ctx);
+ const int idx0 = ctx->idx;
/* bpf function may be invoked by 3 instruction types:
* 1. bl, attached via freplace to bpf prog via short jump
* 2. br, attached via freplace to bpf prog via long jump
@@ -1648,7 +1656,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
+ if (build_prologue(&ctx, was_classic, prog->aux->exception_cb,
+ bpf_is_subprog(prog))) {
prog = orig_prog;
goto out_off;
}
@@ -1696,7 +1705,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
ctx.idx = 0;
ctx.exentry_idx = 0;
- build_prologue(&ctx, was_classic, prog->aux->exception_cb);
+ build_prologue(&ctx, was_classic, prog->aux->exception_cb,
+ bpf_is_subprog(prog));
if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
@@ -1745,9 +1755,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
jit_data->ro_header = ro_header;
}
- prog->bpf_func = (void *)ctx.ro_image;
+ prog->bpf_func = (void *)ctx.ro_image + cfi_get_offset();
prog->jited = 1;
- prog->jited_len = prog_size;
+ prog->jited_len = prog_size - cfi_get_offset();
if (!prog->is_func || extra_pass) {
int i;
@@ -2011,6 +2021,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
/* return address locates above FP */
retaddr_off = stack_size + 8;
+ if (flags & BPF_TRAMP_F_INDIRECT) {
+ /*
+ * Indirect call for bpf_struct_ops
+ */
+ emit_kcfi(cfi_get_func_hash(func_addr), ctx);
+ }
/* bpf trampoline may be invoked by 3 instruction types:
* 1. bl, attached to bpf prog or kernel function via short jump
* 2. br, attached to bpf prog or kernel function via long jump
--
2.40.1
Powered by blists - more mailing lists