[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <8a7a1960-1678-6949-b4e0-b3621d31ea9c@linux.dev>
Date: Thu, 27 Jul 2023 20:52:25 -0700
From: Yonghong Song <yonghong.song@...ux.dev>
To: Arnd Bergmann <arnd@...nel.org>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Song Liu <song@...nel.org>,
Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>
Cc: Arnd Bergmann <arnd@...db.de>, stable@...r.kernel.org,
John Fastabend <john.fastabend@...il.com>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Yonghong Song <yhs@...com>, KP Singh <kpsingh@...nel.org>,
Stanislav Fomichev <sdf@...gle.com>,
Hao Luo <haoluo@...gle.com>, Jiri Olsa <jolsa@...nel.org>,
Kumar Kartikeya Dwivedi <memxor@...il.com>,
Dave Marchevsky <davemarchevsky@...com>,
David Vernet <void@...ifault.com>,
Kees Cook <keescook@...omium.org>, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-trace-kernel@...r.kernel.org
Subject: Re: [PATCH] [v4] bpf: fix bpf_probe_read_kernel prototype mismatch
On 7/25/23 1:41 PM, Arnd Bergmann wrote:
> From: Arnd Bergmann <arnd@...db.de>
>
> bpf_probe_read_kernel() has a __weak definition in core.c and another
> definition with an incompatible prototype in kernel/trace/bpf_trace.c,
> when CONFIG_BPF_EVENTS is enabled.
>
> Since the two are incompatible, there cannot be a shared declaration in
> a header file, but the lack of a prototype causes a W=1 warning:
>
> kernel/bpf/core.c:1638:12: error: no previous prototype for 'bpf_probe_read_kernel' [-Werror=missing-prototypes]
>
> On 32-bit architectures, the local prototype
>
> u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
>
> passes arguments in other registers as the one in bpf_trace.c
>
> BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
> const void *, unsafe_ptr)
>
> which uses 64-bit arguments in pairs of registers.
>
> As both versions of the function are fairly simple and only really
> differ in one line, just move them into a header file as an inline
> function that does not add any overhead for the bpf_trace.c callers
> and actually avoids a function call for the other one.
>
> Cc: stable@...r.kernel.org
> Link: https://lore.kernel.org/all/ac25cb0f-b804-1649-3afb-1dc6138c2716@iogearbox.net/
> Signed-off-by: Arnd Bergmann <arnd@...db.de>
LGTM but there are some additional changes in kernel so you need to
rebase on top of master branch with the following additional change:
@@ -2082,7 +2076,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct
bpf_insn *insn)
DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
CONT; \
LDX_PROBE_MEMSX_##SIZEOP: \
- bpf_probe_read_kernel(&DST, sizeof(SIZE), \
+ bpf_probe_read_kernel_common(&DST, sizeof(SIZE),
\
(const void *)(long) (SRC +
insn->off)); \
DST = *((SIZE *)&DST); \
CONT;
Thanks!
> --
> v4: rewrite again to use a shared inline helper
> v3: clarify changelog text further.
> v2: rewrite completely to fix the mismatch.
> ---
> include/linux/bpf.h | 12 ++++++++++++
> kernel/bpf/core.c | 10 ++--------
> kernel/trace/bpf_trace.c | 11 -----------
> 3 files changed, 14 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index ceaa8c23287fc..abe75063630b8 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -2661,6 +2661,18 @@ static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
> }
> #endif /* CONFIG_BPF_SYSCALL */
>
> +static __always_inline int
> +bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
> +{
> + int ret = -EFAULT;
> +
> + if (IS_ENABLED(CONFIG_BPF_EVENTS))
> + ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
> + if (unlikely(ret < 0))
> + memset(dst, 0, size);
> + return ret;
> +}
> +
> void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
> struct btf_mod_pair *used_btfs, u32 len);
>
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index dd70c58c9d3a3..9cdf53bfb8bd3 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -1634,12 +1634,6 @@ bool bpf_opcode_in_insntable(u8 code)
> }
>
> #ifndef CONFIG_BPF_JIT_ALWAYS_ON
> -u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
> -{
> - memset(dst, 0, size);
> - return -EFAULT;
> -}
> -
> /**
> * ___bpf_prog_run - run eBPF program on a given context
> * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
> @@ -1930,8 +1924,8 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
> DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
> CONT; \
> LDX_PROBE_MEM_##SIZEOP: \
> - bpf_probe_read_kernel(&DST, sizeof(SIZE), \
> - (const void *)(long) (SRC + insn->off)); \
> + bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
> + (const void *)(long) (SRC + insn->off)); \
> DST = *((SIZE *)&DST); \
> CONT;
>
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index c92eb8c6ff08d..83bde2475ae54 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -223,17 +223,6 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
> .arg3_type = ARG_ANYTHING,
> };
>
> -static __always_inline int
> -bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
> -{
> - int ret;
> -
> - ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
> - if (unlikely(ret < 0))
> - memset(dst, 0, size);
> - return ret;
> -}
> -
> BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
> const void *, unsafe_ptr)
> {
Powered by blists - more mailing lists