[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231107104755.19a72f896a016f5d4c165f31@kernel.org>
Date: Tue, 7 Nov 2023 10:47:55 +0900
From: Masami Hiramatsu (Google) <mhiramat@...nel.org>
To: "Masami Hiramatsu (Google)" <mhiramat@...nel.org>
Cc: Alexei Starovoitov <alexei.starovoitov@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Florent Revest <revest@...omium.org>,
linux-trace-kernel@...r.kernel.org,
LKML <linux-kernel@...r.kernel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
bpf <bpf@...r.kernel.org>, Sven Schnelle <svens@...ux.ibm.com>,
Alexei Starovoitov <ast@...nel.org>,
Jiri Olsa <jolsa@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Alan Maguire <alan.maguire@...cle.com>,
Mark Rutland <mark.rutland@....com>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
Guo Ren <guoren@...nel.org>
Subject: Re: [RFC PATCH 10/32] function_graph: Have the instances use their
own ftrace_ops for filtering
On Mon, 6 Nov 2023 01:08:32 +0900
"Masami Hiramatsu (Google)" <mhiramat@...nel.org> wrote:
> From: Steven Rostedt (VMware) <rostedt@...dmis.org>
>
> Allow for instances to have their own ftrace_ops part of the fgraph_ops that
> makes the funtion_graph tracer filter on the set_ftrace_filter file of the
> instance and not the top instance.
>
> Signed-off-by: Steven Rostedt (VMware) <rostedt@...dmis.org>
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@...nel.org>
> ---
> include/linux/ftrace.h | 1 +
> kernel/trace/fgraph.c | 60 +++++++++++++++++++++-------------
> kernel/trace/ftrace.c | 6 ++-
> kernel/trace/trace.h | 16 +++++----
> kernel/trace/trace_functions.c | 2 +
> kernel/trace/trace_functions_graph.c | 8 +++--
> 6 files changed, 58 insertions(+), 35 deletions(-)
>
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 7fd044ae3da5..9dab365c6023 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -1044,6 +1044,7 @@ extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph
> struct fgraph_ops {
> trace_func_graph_ent_t entryfunc;
> trace_func_graph_ret_t retfunc;
> + struct ftrace_ops ops; /* for the hash lists */
> void *private;
> };
>
> diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
> index 1e8c17f70b84..0642f3281b64 100644
> --- a/kernel/trace/fgraph.c
> +++ b/kernel/trace/fgraph.c
> @@ -17,14 +17,6 @@
> #include "ftrace_internal.h"
> #include "trace.h"
>
> -#ifdef CONFIG_DYNAMIC_FTRACE
> -#define ASSIGN_OPS_HASH(opsname, val) \
> - .func_hash = val, \
> - .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
> -#else
> -#define ASSIGN_OPS_HASH(opsname, val)
> -#endif
> -
> #define FGRAPH_RET_SIZE sizeof(struct ftrace_ret_stack)
> #define FGRAPH_RET_INDEX (FGRAPH_RET_SIZE / sizeof(long))
>
> @@ -338,9 +330,6 @@ int function_graph_enter(unsigned long ret, unsigned long func,
> return -EBUSY;
> #endif
>
> - if (!ftrace_ops_test(&global_ops, func, NULL))
> - return -EBUSY;
> -
> trace.func = func;
> trace.depth = ++current->curr_ret_depth;
>
> @@ -361,7 +350,8 @@ int function_graph_enter(unsigned long ret, unsigned long func,
> atomic_inc(¤t->trace_overrun);
> break;
> }
> - if (fgraph_array[i]->entryfunc(&trace, fgraph_array[i])) {
> + if (ftrace_ops_test(&gops->ops, func, NULL) &&
> + gops->entryfunc(&trace, gops)) {
> offset = current->curr_ret_stack;
> /* Check the top level stored word */
> type = get_fgraph_type(current, offset - 1);
> @@ -656,17 +646,25 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
> }
> #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
>
> -static struct ftrace_ops graph_ops = {
> - .func = ftrace_graph_func,
> - .flags = FTRACE_OPS_FL_INITIALIZED |
> - FTRACE_OPS_FL_PID |
> - FTRACE_OPS_GRAPH_STUB,
> +void fgraph_init_ops(struct ftrace_ops *dst_ops,
> + struct ftrace_ops *src_ops)
> +{
> + dst_ops->func = ftrace_stub;
> + dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_FL_STUB;
This needs to use FTRACE_OPS_GRAPH_STUB instead of FTRACE_OPS_FL_STUB,
because commit 0c0593b45c9b ("x86/ftrace: Make function graph use ftrace
directly") introduced this flag to switch the mode. (fgraph on ftrace)
> +
> #ifdef FTRACE_GRAPH_TRAMP_ADDR
> - .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
> + dst_ops->trampoline = FTRACE_GRAPH_TRAMP_ADDR;
> /* trampoline_size is only needed for dynamically allocated tramps */
> #endif
> - ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
> -};
> +
> +#ifdef CONFIG_DYNAMIC_FTRACE
> + if (src_ops) {
> + dst_ops->func_hash = &src_ops->local_hash;
> + mutex_init(&dst_ops->local_hash.regex_lock);
> + dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED;
> + }
> +#endif
> +}
>
> void ftrace_graph_sleep_time_control(bool enable)
> {
> @@ -871,11 +869,20 @@ static int start_graph_tracing(void)
>
> int register_ftrace_graph(struct fgraph_ops *gops)
> {
> + int command = 0;
> int ret = 0;
> int i;
>
> mutex_lock(&ftrace_lock);
>
> + if (!gops->ops.func) {
> + gops->ops.flags |= FTRACE_OPS_FL_STUB;
Ditto.
Thanks,
--
Masami Hiramatsu (Google) <mhiramat@...nel.org>
Powered by blists - more mailing lists