[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAEf4BzZJEgVKVZsBvHZuhQWBTN6G7zY9mQH8o5xoyrDEUNG2DA@mail.gmail.com>
Date: Mon, 18 Nov 2019 10:11:57 -0800
From: Andrii Nakryiko <andrii.nakryiko@...il.com>
To: Daniel Borkmann <daniel@...earbox.net>
Cc: Alexei Starovoitov <ast@...nel.org>,
john fastabend <john.fastabend@...il.com>,
Networking <netdev@...r.kernel.org>, bpf <bpf@...r.kernel.org>
Subject: Re: [PATCH rfc bpf-next 8/8] bpf: constant map key tracking for prog
array pokes
On Thu, Nov 14, 2019 at 5:04 PM Daniel Borkmann <daniel@...earbox.net> wrote:
>
> Add tracking of constant keys into tail call maps. The signature of
> bpf_tail_call_proto is that arg1 is ctx, arg2 map pointer and arg3
> is a index key. The direct call approach for tail calls can be enabled
> if the verifier asserted that for all branches leading to the tail call
> helper invocation, the map pointer and index key were both constant
> and the same. Tracking of map pointers we already do from prior work
> via c93552c443eb ("bpf: properly enforce index mask to prevent out-of-bounds
> speculation") and 09772d92cd5a ("bpf: avoid retpoline for lookup/update/
> delete calls on maps"). Given the tail call map index key is not on
> stack but directly in the register, we can add similar tracking approach
> and later in fixup_bpf_calls() add a poke descriptor to the progs poke_tab
> with the relevant information for the JITing phase. We internally reuse
> insn->imm for the rewritten BPF_JMP | BPF_TAIL_CALL instruction in order
> to point into the prog's poke_tab and keep insn->imm == 0 as indicator
> that current indirect tail call emission must be used.
>
> Signed-off-by: Daniel Borkmann <daniel@...earbox.net>
> ---
> include/linux/bpf_verifier.h | 1 +
> kernel/bpf/verifier.c | 98 ++++++++++++++++++++++++++++++++++++
> 2 files changed, 99 insertions(+)
>
> diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
> index cdd08bf0ec06..f494f0c9ac13 100644
> --- a/include/linux/bpf_verifier.h
> +++ b/include/linux/bpf_verifier.h
> @@ -301,6 +301,7 @@ struct bpf_insn_aux_data {
> u32 map_off; /* offset from value base address */
> };
> };
> + u64 key_state; /* constant key tracking for maps */
> int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
> int sanitize_stack_off; /* stack slot to be cleared */
> bool seen; /* this insn was processed by the verifier */
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index e9dc95a18d44..48d5c9030d60 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -171,6 +171,9 @@ struct bpf_verifier_stack_elem {
> #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
> #define BPF_COMPLEXITY_LIMIT_STATES 64
>
> +#define BPF_MAP_KEY_POISON (1ULL << 63)
> +#define BPF_MAP_KEY_SEEN (1ULL << 62)
> +
> #define BPF_MAP_PTR_UNPRIV 1UL
> #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
> POISON_POINTER_DELTA))
> @@ -195,6 +198,29 @@ static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
> (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
> }
>
> +static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
> +{
> + return aux->key_state & BPF_MAP_KEY_POISON;
> +}
> +
> +static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
> +{
> + return !(aux->key_state & BPF_MAP_KEY_SEEN);
> +}
> +
> +static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
> +{
> + return aux->key_state & ~BPF_MAP_KEY_SEEN;
> +}
This works out for current logic you've implemented, but it's a bit
misleading that bpf_map_key_immediate is also going to return POISON
bit, was this intentional?
> +
> +static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
> +{
> + bool poisoned = bpf_map_key_poisoned(aux);
> +
> + aux->key_state = state | BPF_MAP_KEY_SEEN |
> + (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
> +}
> +
> struct bpf_call_arg_meta {
> struct bpf_map *map_ptr;
> bool raw_mode;
> @@ -4088,6 +4114,37 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
> return 0;
> }
>
> +static int
> +record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
> + int func_id, int insn_idx)
> +{
> + struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
> + struct bpf_reg_state *regs = cur_regs(env), *reg;
> + struct tnum range = tnum_range(0, U32_MAX);
why U32_MAX, instead of actual size of a map?
> + struct bpf_map *map = meta->map_ptr;
> + u64 val;
> +
> + if (func_id != BPF_FUNC_tail_call)
> + return 0;
> + if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
> + verbose(env, "kernel subsystem misconfigured verifier\n");
> + return -EINVAL;
> + }
> +
> + reg = ®s[BPF_REG_3];
> + if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
> + bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
> + return 0;
> + }
> +
> + val = reg->var_off.value;
> + if (bpf_map_key_unseen(aux))
> + bpf_map_key_store(aux, val);
> + else if (bpf_map_key_immediate(aux) != val)
> + bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
imo, checking for poison first would make this logic a bit more
straightforward (and will avoid unnecessary key_store calls, but
that's minor)
> + return 0;
> +}
> +
> static int check_reference_leak(struct bpf_verifier_env *env)
> {
> struct bpf_func_state *state = cur_func(env);
[...]
Powered by blists - more mailing lists