[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAEf4BzZYepTYLN6LrPAAaOXUtCBv07bQQJzgarntu03L+cj2GQ@mail.gmail.com>
Date: Mon, 7 Feb 2022 10:59:14 -0800
From: Andrii Nakryiko <andrii.nakryiko@...il.com>
To: Jiri Olsa <jolsa@...hat.com>
Cc: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Networking <netdev@...r.kernel.org>, bpf <bpf@...r.kernel.org>,
lkml <linux-kernel@...r.kernel.org>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...omium.org>,
Steven Rostedt <rostedt@...dmis.org>,
Jiri Olsa <olsajiri@...il.com>
Subject: Re: [PATCH 1/8] bpf: Add support to attach kprobe program with fprobe
On Wed, Feb 2, 2022 at 5:53 AM Jiri Olsa <jolsa@...hat.com> wrote:
>
> Adding new link type BPF_LINK_TYPE_FPROBE that attaches kprobe program
> through fprobe API.
>
> The fprobe API allows to attach probe on multiple functions at once very
> fast, because it works on top of ftrace. On the other hand this limits
> the probe point to the function entry or return.
>
> The kprobe program gets the same pt_regs input ctx as when it's attached
> through the perf API.
>
> Adding new attach type BPF_TRACE_FPROBE that enables such link for kprobe
> program.
>
> User provides array of addresses or symbols with count to attach the kprobe
> program to. The new link_create uapi interface looks like:
>
> struct {
> __aligned_u64 syms;
> __aligned_u64 addrs;
> __u32 cnt;
> __u32 flags;
> } fprobe;
>
> The flags field allows single BPF_F_FPROBE_RETURN bit to create return fprobe.
>
> Signed-off-by: Masami Hiramatsu <mhiramat@...nel.org>
> Signed-off-by: Jiri Olsa <jolsa@...nel.org>
> ---
> include/linux/bpf_types.h | 1 +
> include/uapi/linux/bpf.h | 13 ++
> kernel/bpf/syscall.c | 248 ++++++++++++++++++++++++++++++++-
> tools/include/uapi/linux/bpf.h | 13 ++
> 4 files changed, 270 insertions(+), 5 deletions(-)
>
[...]
>
> +#ifdef CONFIG_FPROBE
> +
> +struct bpf_fprobe_link {
> + struct bpf_link link;
> + struct fprobe fp;
> + unsigned long *addrs;
> +};
> +
> +static void bpf_fprobe_link_release(struct bpf_link *link)
> +{
> + struct bpf_fprobe_link *fprobe_link;
> +
> + fprobe_link = container_of(link, struct bpf_fprobe_link, link);
> + unregister_fprobe(&fprobe_link->fp);
> +}
> +
> +static void bpf_fprobe_link_dealloc(struct bpf_link *link)
> +{
> + struct bpf_fprobe_link *fprobe_link;
> +
> + fprobe_link = container_of(link, struct bpf_fprobe_link, link);
> + kfree(fprobe_link->addrs);
> + kfree(fprobe_link);
> +}
> +
> +static const struct bpf_link_ops bpf_fprobe_link_lops = {
> + .release = bpf_fprobe_link_release,
> + .dealloc = bpf_fprobe_link_dealloc,
> +};
> +
should this whole new link implementation (including
fprobe_link_prog_run() below) maybe live in kernel/trace/bpf_trace.c?
Seems a bit more fitting than kernel/bpf/syscall.c
> +static int fprobe_link_prog_run(struct bpf_fprobe_link *fprobe_link,
> + struct pt_regs *regs)
> +{
> + int err;
> +
> + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
> + err = 0;
> + goto out;
> + }
> +
> + rcu_read_lock();
> + migrate_disable();
> + err = bpf_prog_run(fprobe_link->link.prog, regs);
> + migrate_enable();
> + rcu_read_unlock();
> +
> + out:
> + __this_cpu_dec(bpf_prog_active);
> + return err;
> +}
> +
> +static void fprobe_link_entry_handler(struct fprobe *fp, unsigned long entry_ip,
> + struct pt_regs *regs)
> +{
> + unsigned long saved_ip = instruction_pointer(regs);
> + struct bpf_fprobe_link *fprobe_link;
> +
> + /*
> + * Because fprobe's regs->ip is set to the next instruction of
> + * dynamic-ftrace insturction, correct entry ip must be set, so
> + * that the bpf program can access entry address via regs as same
> + * as kprobes.
> + */
> + instruction_pointer_set(regs, entry_ip);
> +
> + fprobe_link = container_of(fp, struct bpf_fprobe_link, fp);
> + fprobe_link_prog_run(fprobe_link, regs);
> +
> + instruction_pointer_set(regs, saved_ip);
> +}
> +
> +static void fprobe_link_exit_handler(struct fprobe *fp, unsigned long entry_ip,
> + struct pt_regs *regs)
isn't it identical to fprobe_lnk_entry_handler? Maybe use one callback
for both entry and exit?
> +{
> + unsigned long saved_ip = instruction_pointer(regs);
> + struct bpf_fprobe_link *fprobe_link;
> +
> + instruction_pointer_set(regs, entry_ip);
> +
> + fprobe_link = container_of(fp, struct bpf_fprobe_link, fp);
> + fprobe_link_prog_run(fprobe_link, regs);
> +
> + instruction_pointer_set(regs, saved_ip);
> +}
> +
> +static int fprobe_resolve_syms(const void *usyms, u32 cnt,
> + unsigned long *addrs)
> +{
> + unsigned long addr, size;
> + const char **syms;
> + int err = -ENOMEM;
> + unsigned int i;
> + char *func;
> +
> + size = cnt * sizeof(*syms);
> + syms = kzalloc(size, GFP_KERNEL);
any reason not to use kvzalloc() here?
> + if (!syms)
> + return -ENOMEM;
> +
[...]
> +
> +static int bpf_fprobe_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
> +{
> + struct bpf_fprobe_link *link = NULL;
> + struct bpf_link_primer link_primer;
> + unsigned long *addrs;
> + u32 flags, cnt, size;
> + void __user *uaddrs;
> + void __user *usyms;
> + int err;
> +
> + /* no support for 32bit archs yet */
> + if (sizeof(u64) != sizeof(void *))
> + return -EINVAL;
-EOPNOTSUPP?
> +
> + if (prog->expected_attach_type != BPF_TRACE_FPROBE)
> + return -EINVAL;
> +
> + flags = attr->link_create.fprobe.flags;
> + if (flags & ~BPF_F_FPROBE_RETURN)
> + return -EINVAL;
> +
> + uaddrs = u64_to_user_ptr(attr->link_create.fprobe.addrs);
> + usyms = u64_to_user_ptr(attr->link_create.fprobe.syms);
> + if ((!uaddrs && !usyms) || (uaddrs && usyms))
> + return -EINVAL;
!!uaddrs == !!usyms ?
> +
> + cnt = attr->link_create.fprobe.cnt;
> + if (!cnt)
> + return -EINVAL;
> +
> + size = cnt * sizeof(*addrs);
> + addrs = kzalloc(size, GFP_KERNEL);
same, why not kvzalloc? Also, aren't you overwriting each addrs entry
anyway, so "z" is not necessary, right?
> + if (!addrs)
> + return -ENOMEM;
> +
[...]
Powered by blists - more mailing lists