[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210701110118.2de4bb9352ca7e8cb86ee6de@kernel.org>
Date: Thu, 1 Jul 2021 11:01:18 +0900
From: Masami Hiramatsu <mhiramat@...nel.org>
To: Yonghong Song <yhs@...com>
Cc: Jiri Olsa <jolsa@...hat.com>, Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andriin@...com>, <netdev@...r.kernel.org>,
<bpf@...r.kernel.org>, Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...omium.org>
Subject: Re: [PATCH bpf-next 4/5] bpf: Add bpf_get_func_ip helper for kprobe
programs
On Wed, 30 Jun 2021 18:45:46 -0700
Yonghong Song <yhs@...com> wrote:
>
>
> On 6/30/21 4:58 PM, Masami Hiramatsu wrote:
> > On Wed, 30 Jun 2021 10:47:01 -0700
> > Yonghong Song <yhs@...com> wrote:
> >
> >>
> >>
> >> On 6/29/21 12:29 PM, Jiri Olsa wrote:
> >>> Adding bpf_get_func_ip helper for BPF_PROG_TYPE_KPROBE programs,
> >>> so it's now possible to call bpf_get_func_ip from both kprobe and
> >>> kretprobe programs.
> >>>
> >>> Taking the caller's address from 'struct kprobe::addr', which is
> >>> defined for both kprobe and kretprobe.
> >>>
> >>> Signed-off-by: Jiri Olsa <jolsa@...nel.org>
> >>> ---
> >>> include/uapi/linux/bpf.h | 2 +-
> >>> kernel/bpf/verifier.c | 2 ++
> >>> kernel/trace/bpf_trace.c | 14 ++++++++++++++
> >>> kernel/trace/trace_kprobe.c | 20 ++++++++++++++++++--
> >>> kernel/trace/trace_probe.h | 5 +++++
> >>> tools/include/uapi/linux/bpf.h | 2 +-
> >>> 6 files changed, 41 insertions(+), 4 deletions(-)
> >>>
> >>> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> >>> index 83e87ffdbb6e..4894f99a1993 100644
> >>> --- a/include/uapi/linux/bpf.h
> >>> +++ b/include/uapi/linux/bpf.h
> >>> @@ -4783,7 +4783,7 @@ union bpf_attr {
> >>> *
> >>> * u64 bpf_get_func_ip(void *ctx)
> >>> * Description
> >>> - * Get address of the traced function (for tracing programs).
> >>> + * Get address of the traced function (for tracing and kprobe programs).
> >>> * Return
> >>> * Address of the traced function.
> >>> */
> >>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> >>> index 701ff7384fa7..b66e0a7104f8 100644
> >>> --- a/kernel/bpf/verifier.c
> >>> +++ b/kernel/bpf/verifier.c
> >>> @@ -5979,6 +5979,8 @@ static bool has_get_func_ip(struct bpf_verifier_env *env)
> >>> return -ENOTSUPP;
> >>> }
> >>> return 0;
> >>> + } else if (type == BPF_PROG_TYPE_KPROBE) {
> >>> + return 0;
> >>> }
> >>>
> >>> verbose(env, "func %s#%d not supported for program type %d\n",
> >>> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> >>> index 9edd3b1a00ad..1a5bddce9abd 100644
> >>> --- a/kernel/trace/bpf_trace.c
> >>> +++ b/kernel/trace/bpf_trace.c
> >>> @@ -961,6 +961,18 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
> >>> .arg1_type = ARG_PTR_TO_CTX,
> >>> };
> >>>
> >>> +BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
> >>> +{
> >>> + return trace_current_kprobe_addr();
> >>> +}
> >>> +
> >>> +static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
> >>> + .func = bpf_get_func_ip_kprobe,
> >>> + .gpl_only = true,
> >>> + .ret_type = RET_INTEGER,
> >>> + .arg1_type = ARG_PTR_TO_CTX,
> >>> +};
> >>> +
> >>> const struct bpf_func_proto *
> >>> bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> >>> {
> >>> @@ -1092,6 +1104,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> >>> case BPF_FUNC_override_return:
> >>> return &bpf_override_return_proto;
> >>> #endif
> >>> + case BPF_FUNC_get_func_ip:
> >>> + return &bpf_get_func_ip_proto_kprobe;
> >>> default:
> >>> return bpf_tracing_func_proto(func_id, prog);
> >>> }
> >>> diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
> >>> index ea6178cb5e33..b07d5888db14 100644
> >>> --- a/kernel/trace/trace_kprobe.c
> >>> +++ b/kernel/trace/trace_kprobe.c
> >>> @@ -1570,6 +1570,18 @@ static int kretprobe_event_define_fields(struct trace_event_call *event_call)
> >>> }
> >>>
> >>> #ifdef CONFIG_PERF_EVENTS
> >>> +/* Used by bpf get_func_ip helper */
> >>> +DEFINE_PER_CPU(u64, current_kprobe_addr) = 0;
> >>
> >> Didn't check other architectures. But this should work
> >> for x86 where if nested kprobe happens, the second
> >> kprobe will not call kprobe handlers.
> >
> > No problem, other architecture also does not call nested kprobes handlers.
> > However, you don't need this because you can use kprobe_running()
> > in kprobe context.
> >
> > kp = kprobe_running();
> > if (kp)
> > return kp->addr;
> >
> > BTW, I'm not sure why don't you use instruction_pointer(regs)?
>
> How about kretprobe? I guess kp->addr should still point to
> function address but instruction_pointer(regs) does not?
It is now under review (waiting for merge), please check this series.
https://lore.kernel.org/bpf/162400000592.506599.4695807810528866713.stgit@devnote2/
Then you can use instruction_pointer(regs) in kretprobes too.
Thank you,
>
> >
> > Thank you,
> >
> >>
> >> This essentially is to provide an additional parameter to
> >> bpf program. Andrii is developing a mechanism to
> >> save arbitrary data in *current task_struct*, which
> >> might be used here to save current_kprobe_addr, we can
> >> save one per cpu variable.
> >>
> >>> +
> >>> +u64 trace_current_kprobe_addr(void)
> >>> +{
> >>> + return *this_cpu_ptr(¤t_kprobe_addr);
> >>> +}
> >>> +
> >>> +static void trace_current_kprobe_set(struct trace_kprobe *tk)
> >>> +{
> >>> + __this_cpu_write(current_kprobe_addr, (u64) tk->rp.kp.addr);
> >>> +}
> >>>
> >>> /* Kprobe profile handler */
> >>> static int
> >>> @@ -1585,6 +1597,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
> >>> unsigned long orig_ip = instruction_pointer(regs);
> >>> int ret;
> >>>
> >>> + trace_current_kprobe_set(tk);
> >>> ret = trace_call_bpf(call, regs);
> >>>
> >>> /*
> >>> @@ -1631,8 +1644,11 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
> >>> int size, __size, dsize;
> >>> int rctx;
> >>>
> >>> - if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
> >>> - return;
> >>> + if (bpf_prog_array_valid(call)) {
> >>> + trace_current_kprobe_set(tk);
> >>> + if (!trace_call_bpf(call, regs))
> >>> + return;
> >>> + }
> >>>
> >>> head = this_cpu_ptr(call->perf_events);
> >>> if (hlist_empty(head))
> >> [...]
> >
> >
--
Masami Hiramatsu <mhiramat@...nel.org>
Powered by blists - more mailing lists