lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <15c26b1f-b78d-45d0-b5d2-e8359ddf5bbc@linux.dev>
Date: Tue, 23 Dec 2025 10:32:09 -0800
From: Yonghong Song <yonghong.song@...ux.dev>
To: Xu Kuohai <xukuohai@...weicloud.com>, bpf@...r.kernel.org,
 linux-kernel@...r.kernel.org
Cc: Alexei Starovoitov <ast@...nel.org>,
 Daniel Borkmann <daniel@...earbox.net>, Andrii Nakryiko <andrii@...nel.org>,
 Martin KaFai Lau <martin.lau@...ux.dev>, Eduard Zingerman
 <eddyz87@...il.com>, Puranjay Mohan <puranjay@...nel.org>,
 Anton Protopopov <a.s.protopopov@...il.com>,
 Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>
Subject: Re: [PATCH bpf-next v2] bpf: arm64: Fix panic due to missing BTI at
 indirect jump targets



On 12/23/25 12:54 AM, Xu Kuohai wrote:
> From: Xu Kuohai <xukuohai@...wei.com>
>
> When BTI is enabled, the indirect jump selftest triggers BTI exception:
>
> Internal error: Oops - BTI: 0000000036000003 [#1]  SMP
> ...
> Call trace:
>   bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
>   bpf_prog_run_pin_on_cpu+0x140/0x464
>   bpf_prog_test_run_syscall+0x274/0x3ac
>   bpf_prog_test_run+0x224/0x2b0
>   __sys_bpf+0x4cc/0x5c8
>   __arm64_sys_bpf+0x7c/0x94
>   invoke_syscall+0x78/0x20c
>   el0_svc_common+0x11c/0x1c0
>   do_el0_svc+0x48/0x58
>   el0_svc+0x54/0x19c
>   el0t_64_sync_handler+0x84/0x12c
>   el0t_64_sync+0x198/0x19c
>
> This happens because no BTI instruction is generated by the JIT for
> indirect jump targets.
>
> Fix it by emitting BTI instruction for every possible indirect jump
> targets when BTI is enabled. The targets are identified by traversing
> all instruction arrays of jump table type used by the BPF program,
> since indirect jump targets can only be read from instruction arrays
> of jump table type.
>
> Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
> Signed-off-by: Xu Kuohai <xukuohai@...wei.com>
> ---
> v2:
> - Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
>
> v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
> ---
>   arch/arm64/net/bpf_jit_comp.c | 20 +++++++++++
>   include/linux/bpf.h           | 19 +++++++++++
>   kernel/bpf/bpf_insn_array.c   | 63 +++++++++++++++++++++++++++++++++++
>   kernel/bpf/verifier.c         |  6 ++++
>   4 files changed, 108 insertions(+)
>
> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> index 0c4d44bcfbf4..f08f0f9fa04e 100644
> --- a/arch/arm64/net/bpf_jit_comp.c
> +++ b/arch/arm64/net/bpf_jit_comp.c
> @@ -78,6 +78,7 @@ static const int bpf2a64[] = {
>   
>   struct jit_ctx {
>   	const struct bpf_prog *prog;
> +	unsigned long *indirect_targets;
>   	int idx;
>   	int epilogue_offset;
>   	int *offset;
> @@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
>   	return 0;
>   }
>   
> +static bool is_indirect_target(int insn_off, unsigned long *targets_bitmap)
> +{
> +	return targets_bitmap && test_bit(insn_off, targets_bitmap);
> +}
> +
>   /* JITs an eBPF instruction.
>    * Returns:
>    * 0  - successfully JITed an 8-byte eBPF instruction.
> @@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
>   	int ret;
>   	bool sign_extend;
>   
> +	if (is_indirect_target(i, ctx->indirect_targets))
> +		emit_bti(A64_BTI_J, ctx);
> +
>   	switch (code) {
>   	/* dst = src */
>   	case BPF_ALU | BPF_MOV | BPF_X:
> @@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>   	memset(&ctx, 0, sizeof(ctx));
>   	ctx.prog = prog;
>   
> +	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_jump_table(prog)) {
> +		ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
> +						GFP_KERNEL);
> +		if (ctx.indirect_targets == NULL) {
> +			prog = orig_prog;
> +			goto out_off;
> +		}
> +		bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
> +	}
> +
>   	ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
>   	if (ctx.offset == NULL) {
>   		prog = orig_prog;
> @@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>   			prog->aux->priv_stack_ptr = NULL;
>   		}
>   		kvfree(ctx.offset);
> +		kvfree(ctx.indirect_targets);
>   out_priv_stack:
>   		kfree(jit_data);
>   		prog->aux->jit_data = NULL;
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index da6a00dd313f..a3a89d4b4dae 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -3875,13 +3875,32 @@ void bpf_insn_array_release(struct bpf_map *map);
>   void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
>   void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
>   
> +enum bpf_insn_array_type {
> +	BPF_INSN_ARRAY_VOID,

What is the purpose for BPF_INSN_ARRAY_VOID? Do we really need it?

> +	BPF_INSN_ARRAY_JUMP_TABLE,
> +};
> +
>   #ifdef CONFIG_BPF_SYSCALL
>   void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
> +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type);
> +bool bpf_prog_has_jump_table(const struct bpf_prog *prog);
>   #else
>   static inline void
>   bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
>   {
>   }
> +static inline void
> +bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> +{
> +}
> +static inline void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
> +{
> +}
> +static inline bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
> +{
> +	return false;
> +}
>   #endif
>   
>   static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
> diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
> index c96630cb75bf..fbffc865feab 100644
> --- a/kernel/bpf/bpf_insn_array.c
> +++ b/kernel/bpf/bpf_insn_array.c
> @@ -5,6 +5,7 @@
>   
>   struct bpf_insn_array {
>   	struct bpf_map map;
> +	int type;
>   	atomic_t used;
>   	long *ips;
>   	DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
> @@ -159,6 +160,17 @@ static bool is_insn_array(const struct bpf_map *map)
>   	return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
>   }
>   
> +static bool is_jump_table(const struct bpf_map *map)
> +{
> +	struct bpf_insn_array *insn_array;
> +
> +	if (!is_insn_array(map))
> +		return false;
> +
> +	insn_array = cast_insn_array(map);
> +	return insn_array->type == BPF_INSN_ARRAY_JUMP_TABLE;
> +}
> +
>   static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
>   				 const struct bpf_prog *prog)
>   {
> @@ -302,3 +314,54 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
>   		}
>   	}
>   }
> +
> +bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
> +{
> +	int i;
> +
> +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
> +		if (is_jump_table(prog->aux->used_maps[i]))
> +			return true;
> +	}
> +	return false;
> +}
> +
> +/*
> + * This function collects possible indirect jump targets in a BPF program. Since indirect jump
> + * targets can only be read from indirect arrays used as jump table, it traverses all jump
> + * tables used by @prog. For each instruction found in the jump tables, it sets the corresponding
> + * bit in @bitmap.
> + */
> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> +{
> +	struct bpf_insn_array *insn_array;
> +	struct bpf_map *map;
> +	u32 xlated_off;
> +	int i, j;
> +
> +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
> +		map = prog->aux->used_maps[i];
> +		if (!is_jump_table(map))
> +			continue;
> +
> +		insn_array = cast_insn_array(map);
> +		for (j = 0; j < map->max_entries; j++) {
> +			xlated_off = insn_array->values[j].xlated_off;
> +			if (xlated_off == INSN_DELETED)
> +				continue;
> +			if (xlated_off < prog->aux->subprog_start)
> +				continue;
> +			xlated_off -= prog->aux->subprog_start;
> +			if (xlated_off >= prog->len)
> +				continue;

The above codesĀ are duplicated with bpf_prog_update_insn_ptrs().
Maybe we can have a helper for the above?

> +			__set_bit(xlated_off, bitmap);
> +		}
> +	}
> +}
> +
> +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
> +{
> +	struct bpf_insn_array *insn_array = cast_insn_array(map);
> +
> +	insn_array->type = type;
> +}
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index d6b8a77fbe3b..ee6f4ddfbb79 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -20288,6 +20288,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
>   		return -EINVAL;
>   	}
>   
> +	/*
> +	 * Explicitly mark this map as a jump table such that it can be
> +	 * distinguished later from other instruction arrays
> +	 */
> +	bpf_prog_set_insn_array_type(map, BPF_INSN_ARRAY_JUMP_TABLE);

I think we do not need this for now. If a new indirect_jump type is introduced,
verifier/jit can be adjusted that time if necessary.

> +
>   	for (i = 0; i < n - 1; i++) {
>   		other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
>   					  env->insn_idx, env->cur_state->speculative);


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ