[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y07raim32wOBRGPi@google.com>
Date: Tue, 18 Oct 2022 11:07:38 -0700
From: sdf@...gle.com
To: Kees Cook <keescook@...omium.org>
Cc: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
John Fastabend <john.fastabend@...il.com>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Song Liu <song@...nel.org>, Yonghong Song <yhs@...com>,
KP Singh <kpsingh@...nel.org>, Hao Luo <haoluo@...gle.com>,
Jiri Olsa <jolsa@...nel.org>, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-hardening@...r.kernel.org
Subject: Re: [PATCH] bpf: Use kmalloc_size_roundup() to match ksize() usage
On 10/18, Kees Cook wrote:
> Round up allocations with kmalloc_size_roundup() so that the verifier's
> use of ksize() is always accurate and no special handling of the memory
> is needed by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE. Pass the new size
> information back up to callers so they can use the space immediately,
> so array resizing to happen less frequently as well. Explicitly zero
> any trailing bytes in new allocations.
> Additionally fix a memory allocation leak: if krealloc() fails, "arr"
> wasn't freed, but NULL was return to the caller of realloc_array() would
> be writing NULL to the lvalue, losing the reference to the original
> memory.
> Cc: Alexei Starovoitov <ast@...nel.org>
> Cc: Daniel Borkmann <daniel@...earbox.net>
> Cc: John Fastabend <john.fastabend@...il.com>
> Cc: Andrii Nakryiko <andrii@...nel.org>
> Cc: Martin KaFai Lau <martin.lau@...ux.dev>
> Cc: Song Liu <song@...nel.org>
> Cc: Yonghong Song <yhs@...com>
> Cc: KP Singh <kpsingh@...nel.org>
> Cc: Stanislav Fomichev <sdf@...gle.com>
> Cc: Hao Luo <haoluo@...gle.com>
> Cc: Jiri Olsa <jolsa@...nel.org>
> Cc: bpf@...r.kernel.org
> Signed-off-by: Kees Cook <keescook@...omium.org>
> ---
> kernel/bpf/verifier.c | 49 +++++++++++++++++++++++++++----------------
> 1 file changed, 31 insertions(+), 18 deletions(-)
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 014ee0953dbd..8a0b60207d0e 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -1000,42 +1000,53 @@ static void print_insn_state(struct
> bpf_verifier_env *env,
> */
> static void *copy_array(void *dst, const void *src, size_t n, size_t
> size, gfp_t flags)
> {
> - size_t bytes;
> + size_t src_bytes, dst_bytes;
> if (ZERO_OR_NULL_PTR(src))
> goto out;
> - if (unlikely(check_mul_overflow(n, size, &bytes)))
> + if (unlikely(check_mul_overflow(n, size, &src_bytes)))
> return NULL;
> - if (ksize(dst) < bytes) {
> + dst_bytes = kmalloc_size_roundup(src_bytes);
> + if (ksize(dst) < dst_bytes) {
Why not simply do the following here?
if (ksize(dst) < ksize(src)) {
?
It seems like we care about src_bytes/bytes only in this case, so maybe
move that check_mul_overflow under this branch as well?
> kfree(dst);
> - dst = kmalloc_track_caller(bytes, flags);
> + dst = kmalloc_track_caller(dst_bytes, flags);
> if (!dst)
> return NULL;
> }
> - memcpy(dst, src, bytes);
> + memcpy(dst, src, src_bytes);
> + memset(dst + src_bytes, 0, dst_bytes - src_bytes);
> out:
> return dst ? dst : ZERO_SIZE_PTR;
> }
> -/* resize an array from old_n items to new_n items. the array is
> reallocated if it's too
> - * small to hold new_n items. new items are zeroed out if the array
> grows.
> +/* Resize an array from old_n items to *new_n items. The array is
> reallocated if it's too
> + * small to hold *new_n items. New items are zeroed out if the array
> grows. Allocation
> + * is rounded up to next kmalloc bucket size to reduce frequency of
> resizing. *new_n
> + * contains the new total number of items that will fit.
> *
> - * Contrary to krealloc_array, does not free arr if new_n is zero.
> + * Contrary to krealloc, does not free arr if new_n is zero.
> */
> -static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t
> size)
> +static void *realloc_array(void *arr, size_t old_n, size_t *new_n,
> size_t size)
> {
> - if (!new_n || old_n == new_n)
> + void *old_arr = arr;
> + size_t alloc_size;
> +
> + if (!new_n || !*new_n || old_n == *new_n)
> goto out;
[..]
> - arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
> - if (!arr)
> + alloc_size = kmalloc_size_roundup(size_mul(*new_n, size));
> + arr = krealloc(old_arr, alloc_size, GFP_KERNEL);
> + if (!arr) {
> + kfree(old_arr);
> return NULL;
> + }
Any reason not do hide this complexity behind krealloc_array? Why can't
it take care of those roundup details?
> - if (new_n > old_n)
> - memset(arr + old_n * size, 0, (new_n - old_n) * size);
> + *new_n = alloc_size / size;
> + if (*new_n > old_n)
> + memset(arr + old_n * size, 0, (*new_n - old_n) * size);
> out:
> return arr ? arr : ZERO_SIZE_PTR;
> @@ -1067,7 +1078,7 @@ static int copy_stack_state(struct bpf_func_state
> *dst, const struct bpf_func_st
> static int resize_reference_state(struct bpf_func_state *state, size_t n)
> {
> - state->refs = realloc_array(state->refs, state->acquired_refs, n,
> + state->refs = realloc_array(state->refs, state->acquired_refs, &n,
> sizeof(struct bpf_reference_state));
> if (!state->refs)
> return -ENOMEM;
> @@ -1083,11 +1094,11 @@ static int grow_stack_state(struct bpf_func_state
> *state, int size)
> if (old_n >= n)
> return 0;
> - state->stack = realloc_array(state->stack, old_n, n, sizeof(struct
> bpf_stack_state));
> + state->stack = realloc_array(state->stack, old_n, &n, sizeof(struct
> bpf_stack_state));
> if (!state->stack)
> return -ENOMEM;
> - state->allocated_stack = size;
> + state->allocated_stack = n * BPF_REG_SIZE;
> return 0;
> }
> @@ -2499,9 +2510,11 @@ static int push_jmp_history(struct
> bpf_verifier_env *env,
> {
> u32 cnt = cur->jmp_history_cnt;
> struct bpf_idx_pair *p;
> + size_t size;
> cnt++;
> - p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
> + size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
> + p = krealloc(cur->jmp_history, size, GFP_USER);
> if (!p)
> return -ENOMEM;
> p[cnt - 1].idx = env->insn_idx;
> --
> 2.34.1
Powered by blists - more mailing lists