[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230830162039.95c575460609cebdc34ab0c1@kernel.org>
Date: Wed, 30 Aug 2023 16:20:39 +0900
From: Masami Hiramatsu (Google) <mhiramat@...nel.org>
To: "Masami Hiramatsu (Google)" <mhiramat@...nel.org>
Cc: Alexei Starovoitov <alexei.starovoitov@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Florent Revest <revest@...omium.org>,
linux-trace-kernel@...r.kernel.org,
LKML <linux-kernel@...r.kernel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
bpf <bpf@...r.kernel.org>, Sven Schnelle <svens@...ux.ibm.com>,
Alexei Starovoitov <ast@...nel.org>,
Jiri Olsa <jolsa@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Alan Maguire <alan.maguire@...cle.com>,
Mark Rutland <mark.rutland@....com>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>
Subject: Re: [PATCH v4 6/9] tracing/fprobe: Enable fprobe events with
CONFIG_DYNAMIC_FTRACE_WITH_ARGS
On Thu, 24 Aug 2023 00:16:37 +0900
"Masami Hiramatsu (Google)" <mhiramat@...nel.org> wrote:
> +#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \
> + defined(CONFIG_HAVE_PT_REGS_TO_FTRACE_REGS_CAST)
> +
> +static __always_inline
> +struct pt_regs *perf_fprobe_partial_regs(struct ftrace_regs *fregs)
> +{
> + /* See include/linux/ftrace.h, this returns &fregs->regs */
> + return ftrace_partial_regs(fregs, NULL);
> +}
> +
> +#define perf_fprobe_return_regs(regs) do {} while (0)
> +
> +#else /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS && !CONFIG_HAVE_PT_REGS_TO_FTRACE_REGS_CAST */
> +
> +/* Since fprobe handlers can be nested, pt_regs buffer need to be a stack */
> +#define PERF_FPROBE_REGS_MAX 4
> +
> +struct pt_regs_stack {
> + struct pt_regs regs[PERF_FPROBE_REGS_MAX];
> + int idx;
> +};
> +
> +static DEFINE_PER_CPU(struct pt_regs_stack, perf_fprobe_regs);
> +
> +static __always_inline
> +struct pt_regs *perf_fprobe_partial_regs(struct ftrace_regs *fregs)
> +{
> + struct pt_regs_stack *stack = this_cpu_ptr(&perf_fprobe_regs);
> + struct pt_regs *regs;
> +
> + if (stack->idx < PERF_FPROBE_REGS_MAX) {
> + regs = stack->regs[stack->idx++];
> + return ftrace_partial_regs(fregs, regs);
> + }
> + return NULL;
> +}
> +
> +static __always_inline void perf_fprobe_return_regs(struct pt_regs *regs)
> +{
> + struct pt_regs_stack *stack = this_cpu_ptr(&perf_fprobe_regs);
> +
> + if (WARN_ON_ONCE(regs != stack->regs[stack->idx]))
> + return;
> +
> + --stack->idx;
> +}
Ah, I found that the perf_trace_buf_alloc() does the same thing. So
perf_trace_buf_alloc(size, &pt_regs, &rctx);
will give us the pt_regs at that point. Trace event does that so I think
it is OK to do that here.
Thank you,
> +
> +#endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_PT_REGS_TO_FTRACE_REGS_CAST */
> +
> static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
> - struct pt_regs *regs)
> + struct ftrace_regs *fregs)
> {
> struct trace_event_call *call = trace_probe_event_call(&tf->tp);
> struct fentry_trace_entry_head *entry;
> struct hlist_head *head;
> int size, __size, dsize;
> + struct pt_regs *regs;
> int rctx;
>
> + regs = perf_fprobe_partial_regs(fregs);
> + if (!regs)
> + return -EINVAL;
> +
> head = this_cpu_ptr(call->perf_events);
> if (hlist_empty(head))
> - return 0;
> + goto out;
>
> - dsize = __get_data_size(&tf->tp, regs);
> + dsize = __get_data_size(&tf->tp, fregs);
> __size = sizeof(*entry) + tf->tp.size + dsize;
> size = ALIGN(__size + sizeof(u32), sizeof(u64));
> size -= sizeof(u32);
>
> entry = perf_trace_buf_alloc(size, NULL, &rctx);
Here, we can borrow the pt_regs.
> if (!entry)
> - return 0;
> + goto out;
>
> entry->ip = entry_ip;
> memset(&entry[1], 0, dsize);
> - store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize);
> + store_trace_args(&entry[1], &tf->tp, fregs, sizeof(*entry), dsize);
> perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
> head, NULL);
> +out:
> + perf_fprobe_return_regs(regs);
> return 0;
> }
> NOKPROBE_SYMBOL(fentry_perf_func);
>
> static void
> fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
> - unsigned long ret_ip, struct pt_regs *regs)
> + unsigned long ret_ip, struct ftrace_regs *fregs)
> {
> struct trace_event_call *call = trace_probe_event_call(&tf->tp);
> struct fexit_trace_entry_head *entry;
> struct hlist_head *head;
> int size, __size, dsize;
> + struct pt_regs *regs;
> int rctx;
>
> + regs = perf_fprobe_partial_regs(fregs);
> + if (!regs)
> + return;
> +
> head = this_cpu_ptr(call->perf_events);
> if (hlist_empty(head))
> - return;
> + goto out;
>
> - dsize = __get_data_size(&tf->tp, regs);
> + dsize = __get_data_size(&tf->tp, fregs);
> __size = sizeof(*entry) + tf->tp.size + dsize;
> size = ALIGN(__size + sizeof(u32), sizeof(u64));
> size -= sizeof(u32);
>
> entry = perf_trace_buf_alloc(size, NULL, &rctx);
Ditto.
Thanks,
--
Masami Hiramatsu (Google) <mhiramat@...nel.org>
Powered by blists - more mailing lists