[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4f4136f5-db54-f541-2843-ccb35be25ab4@fb.com>
Date: Tue, 10 Sep 2019 08:37:19 +0000
From: Yonghong Song <yhs@...com>
To: Sami Tolvanen <samitolvanen@...gle.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>
CC: Kees Cook <keescook@...omium.org>, Martin Lau <kafai@...com>,
Song Liu <songliubraving@...com>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"bpf@...r.kernel.org" <bpf@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] bpf: validate bpf_func when BPF_JIT is enabled
On 9/9/19 11:32 PM, Sami Tolvanen wrote:
> With CONFIG_BPF_JIT, the kernel makes indirect calls to dynamically
> generated code. This change adds basic sanity checking to ensure
> we are jumping to a valid location, which narrows down the attack
> surface on the stored pointer. This also prepares the code for future
> Control-Flow Integrity (CFI) checking, which adds indirect call
> validation to call targets that can be determined at compile-time, but
> cannot validate calls to jited functions.
>
> In addition, this change adds a weak arch_bpf_jit_check_func function,
> which architectures that implement BPF JIT can override to perform
> additional validation, such as verifying that the pointer points to
> the correct memory region.
You did not mention BPF_BINARY_HEADER_MAGIC and added member
of `magic` in bpf_binary_header. Could you add some details
on what is the purpose for this `magic` member?
>
> Signed-off-by: Sami Tolvanen <samitolvanen@...gle.com>
> ---
> include/linux/filter.h | 26 ++++++++++++++++++++++++--
> kernel/bpf/core.c | 25 +++++++++++++++++++++++++
> 2 files changed, 49 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/filter.h b/include/linux/filter.h
> index 92c6e31fb008..abfb0e1b21a8 100644
> --- a/include/linux/filter.h
> +++ b/include/linux/filter.h
> @@ -511,7 +511,10 @@ struct sock_fprog_kern {
> struct sock_filter *filter;
> };
>
> +#define BPF_BINARY_HEADER_MAGIC 0x05de0e82
> +
> struct bpf_binary_header {
> + u32 magic;
> u32 pages;
> /* Some arches need word alignment for their instructions */
> u8 image[] __aligned(4);
> @@ -553,20 +556,39 @@ struct sk_filter {
>
> DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
>
> +#ifdef CONFIG_BPF_JIT
> +/*
> + * With JIT, the kernel makes an indirect call to dynamically generated
> + * code. Use bpf_call_func to perform additional validation of the call
> + * target to narrow down attack surface. Architectures implementing BPF
> + * JIT can override arch_bpf_jit_check_func for arch-specific checking.
> + */
> +extern unsigned int bpf_call_func(const struct bpf_prog *prog,
> + const void *ctx);
> +
> +extern bool arch_bpf_jit_check_func(const struct bpf_prog *prog);
> +#else
> +static inline unsigned int bpf_call_func(const struct bpf_prog *prog,
> + const void *ctx)
> +{
> + return prog->bpf_func(ctx, prog->insnsi);
> +}
> +#endif
> +
> #define BPF_PROG_RUN(prog, ctx) ({ \
> u32 ret; \
> cant_sleep(); \
> if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
> struct bpf_prog_stats *stats; \
> u64 start = sched_clock(); \
> - ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
> + ret = bpf_call_func(prog, ctx); \
> stats = this_cpu_ptr(prog->aux->stats); \
> u64_stats_update_begin(&stats->syncp); \
> stats->cnt++; \
> stats->nsecs += sched_clock() - start; \
> u64_stats_update_end(&stats->syncp); \
> } else { \
> - ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
> + ret = bpf_call_func(prog, ctx); \
> } \
> ret; })
>
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index 66088a9e9b9e..7aad58f67105 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -792,6 +792,30 @@ void __weak bpf_jit_free_exec(void *addr)
> module_memfree(addr);
> }
>
> +#ifdef CONFIG_BPF_JIT
> +bool __weak arch_bpf_jit_check_func(const struct bpf_prog *prog)
> +{
> + return true;
> +}
> +
> +unsigned int bpf_call_func(const struct bpf_prog *prog, const void *ctx)
> +{
> + const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
> +
> + if (!IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) && !prog->jited)
> + return prog->bpf_func(ctx, prog->insnsi);
> +
> + if (unlikely(hdr->magic != BPF_BINARY_HEADER_MAGIC ||
> + !arch_bpf_jit_check_func(prog))) {
> + WARN(1, "attempt to jump to an invalid address");
> + return 0;
> + }
> +
> + return prog->bpf_func(ctx, prog->insnsi);
> +}
The above can be rewritten as
if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) || prog->jited ||
hdr->magic != BPF_BINARY_HEADER_MAGIC ||
!arch_bpf_jit_check_func(prog))) {
WARN(1, "attempt to jump to an invalid address");
return 0;
}
return prog->bpf_func(ctx, prog->insnsi);
BPF_PROG_RUN() will be called during xdp fast path.
Have you measured how much slowdown the above change could
cost for the performance?
> +EXPORT_SYMBOL_GPL(bpf_call_func);
> +#endif
> +
> struct bpf_binary_header *
> bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
> unsigned int alignment,
> @@ -818,6 +842,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
> /* Fill space with illegal/arch-dep instructions. */
> bpf_fill_ill_insns(hdr, size);
>
> + hdr->magic = BPF_BINARY_HEADER_MAGIC;
> hdr->pages = pages;
> hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
> PAGE_SIZE - sizeof(*hdr));
>
Powered by blists - more mailing lists