lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 12 Sep 2018 16:17:36 -0700
From:   Alexei Starovoitov <alexei.starovoitov@...il.com>
To:     Joe Stringer <joe@...d.net.nz>
Cc:     daniel@...earbox.net, netdev@...r.kernel.org, ast@...nel.org,
        john.fastabend@...il.com, tgraf@...g.ch, kafai@...com,
        nitin.hande@...il.com, mauricio.vasquez@...ito.it
Subject: Re: [PATCH bpf-next 06/11] bpf: Add reference tracking to verifier

On Tue, Sep 11, 2018 at 05:36:35PM -0700, Joe Stringer wrote:
> Allow helper functions to acquire a reference and return it into a
> register. Specific pointer types such as the PTR_TO_SOCKET will
> implicitly represent such a reference. The verifier must ensure that
> these references are released exactly once in each path through the
> program.
> 
> To achieve this, this commit assigns an id to the pointer and tracks it
> in the 'bpf_func_state', then when the function or program exits,
> verifies that all of the acquired references have been freed. When the
> pointer is passed to a function that frees the reference, it is removed
> from the 'bpf_func_state` and all existing copies of the pointer in
> registers are marked invalid.
> 
> Signed-off-by: Joe Stringer <joe@...d.net.nz>
> ---
>  include/linux/bpf_verifier.h |  24 ++-
>  kernel/bpf/verifier.c        | 303 ++++++++++++++++++++++++++++++++---
>  2 files changed, 306 insertions(+), 21 deletions(-)
> 
> diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
> index 23a2b17bfd75..23f222e0cb0b 100644
> --- a/include/linux/bpf_verifier.h
> +++ b/include/linux/bpf_verifier.h
> @@ -104,6 +104,17 @@ struct bpf_stack_state {
>  	u8 slot_type[BPF_REG_SIZE];
>  };
>  
> +struct bpf_reference_state {
> +	/* Track each reference created with a unique id, even if the same
> +	 * instruction creates the reference multiple times (eg, via CALL).
> +	 */
> +	int id;
> +	/* Instruction where the allocation of this reference occurred. This
> +	 * is used purely to inform the user of a reference leak.
> +	 */
> +	int insn_idx;
> +};
> +
>  /* state of the program:
>   * type of all registers and stack info
>   */
> @@ -121,7 +132,9 @@ struct bpf_func_state {
>  	 */
>  	u32 subprogno;
>  
> -	/* should be second to last. See copy_func_state() */
> +	/* The following fields should be last. See copy_func_state() */
> +	int acquired_refs;
> +	struct bpf_reference_state *refs;
>  	int allocated_stack;
>  	struct bpf_stack_state *stack;
>  };
> @@ -217,11 +230,16 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
>  __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
>  					   const char *fmt, ...);
>  
> -static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
> +static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
>  {
>  	struct bpf_verifier_state *cur = env->cur_state;
>  
> -	return cur->frame[cur->curframe]->regs;
> +	return cur->frame[cur->curframe];
> +}
> +
> +static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
> +{
> +	return cur_func(env)->regs;
>  }
>  
>  int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index faa83b3d7011..67c62ef67d37 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -1,5 +1,6 @@
>  /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
>   * Copyright (c) 2016 Facebook
> + * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
>   *
>   * This program is free software; you can redistribute it and/or
>   * modify it under the terms of version 2 of the GNU General Public
> @@ -140,6 +141,18 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
>   *
>   * After the call R0 is set to return type of the function and registers R1-R5
>   * are set to NOT_INIT to indicate that they are no longer readable.
> + *
> + * The following reference types represent a potential reference to a kernel
> + * resource which, after first being allocated, must be checked and freed by
> + * the BPF program:
> + * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
> + *
> + * When the verifier sees a helper call return a reference type, it allocates a
> + * pointer id for the reference and stores it in the current function state.
> + * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
> + * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
> + * passes through a NULL-check conditional. For the branch wherein the state is
> + * changed to CONST_IMM, the verifier releases the reference.
>   */
>  
>  /* verifier_state + insn_idx are pushed to stack when branch is encountered */
> @@ -189,6 +202,7 @@ struct bpf_call_arg_meta {
>  	int access_size;
>  	s64 msize_smax_value;
>  	u64 msize_umax_value;
> +	int ptr_id;
>  };
>  
>  static DEFINE_MUTEX(bpf_verifier_lock);
> @@ -251,7 +265,42 @@ static bool type_is_pkt_pointer(enum bpf_reg_type type)
>  
>  static bool reg_type_may_be_null(enum bpf_reg_type type)
>  {
> -	return type == PTR_TO_MAP_VALUE_OR_NULL;
> +	return type == PTR_TO_MAP_VALUE_OR_NULL ||
> +	       type == PTR_TO_SOCKET_OR_NULL;
> +}
> +
> +static bool type_is_refcounted(enum bpf_reg_type type)
> +{
> +	return type == PTR_TO_SOCKET;
> +}
> +
> +static bool type_is_refcounted_or_null(enum bpf_reg_type type)
> +{
> +	return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
> +}
> +
> +static bool reg_is_refcounted(const struct bpf_reg_state *reg)
> +{
> +	return type_is_refcounted(reg->type);
> +}
> +
> +static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
> +{
> +	return type_is_refcounted_or_null(reg->type);
> +}
> +
> +static bool arg_type_is_refcounted(enum bpf_arg_type type)
> +{
> +	return type == ARG_PTR_TO_SOCKET;
> +}
> +
> +/* Determine whether the function releases some resources allocated by another
> + * function call. The first reference type argument will be assumed to be
> + * released by release_reference().
> + */
> +static bool is_release_function(enum bpf_func_id func_id)
> +{
> +	return false;
>  }
>  
>  /* string representation of 'enum bpf_reg_type' */
> @@ -384,6 +433,12 @@ static void print_verifier_state(struct bpf_verifier_env *env,
>  		else
>  			verbose(env, "=%s", types_buf);
>  	}
> +	if (state->acquired_refs && state->refs[0].id) {
> +		verbose(env, " refs=%d", state->refs[0].id);
> +		for (i = 1; i < state->acquired_refs; i++)
> +			if (state->refs[i].id)
> +				verbose(env, ",%d", state->refs[i].id);
> +	}
>  	verbose(env, "\n");
>  }
>  
> @@ -402,6 +457,8 @@ static int copy_##NAME##_state(struct bpf_func_state *dst,		\
>  	       sizeof(*src->FIELD) * (src->COUNT / SIZE));		\
>  	return 0;							\
>  }
> +/* copy_reference_state() */
> +COPY_STATE_FN(reference, acquired_refs, refs, 1)
>  /* copy_stack_state() */
>  COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
>  #undef COPY_STATE_FN
> @@ -440,6 +497,8 @@ static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
>  	state->FIELD = new_##FIELD;					\
>  	return 0;							\
>  }
> +/* realloc_reference_state() */
> +REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
>  /* realloc_stack_state() */
>  REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
>  #undef REALLOC_STATE_FN
> @@ -451,16 +510,89 @@ REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
>   * which realloc_stack_state() copies over. It points to previous
>   * bpf_verifier_state which is never reallocated.
>   */
> -static int realloc_func_state(struct bpf_func_state *state, int size,
> -			      bool copy_old)
> +static int realloc_func_state(struct bpf_func_state *state, int stack_size,
> +			      int refs_size, bool copy_old)
>  {
> -	return realloc_stack_state(state, size, copy_old);
> +	int err = realloc_reference_state(state, refs_size, copy_old);
> +	if (err)
> +		return err;
> +	return realloc_stack_state(state, stack_size, copy_old);
> +}
> +
> +/* Acquire a pointer id from the env and update the state->refs to include
> + * this new pointer reference.
> + * On success, returns a valid pointer id to associate with the register
> + * On failure, returns a negative errno.
> + */
> +static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
> +{
> +	struct bpf_func_state *state = cur_func(env);
> +	int new_ofs = state->acquired_refs;
> +	int id, err;
> +
> +	err = realloc_reference_state(state, state->acquired_refs + 1, true);
> +	if (err)
> +		return err;
> +	id = ++env->id_gen;
> +	state->refs[new_ofs].id = id;
> +	state->refs[new_ofs].insn_idx = insn_idx;
> +
> +	return id;
> +}
> +
> +/* release function corresponding to acquire_reference_state(). Idempotent. */
> +static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
> +{
> +	int i, last_idx;
> +
> +	if (!ptr_id)
> +		return 0;

Is this defensive programming or this condition can actually happen?
As far as I can see all callers suppose to pass valid ptr_id into it.

Acked-by: Alexei Starovoitov <ast@...nel.org>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ