[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240602033833.192386089@goodmis.org>
Date: Sat, 01 Jun 2024 23:37:57 -0400
From: Steven Rostedt <rostedt@...dmis.org>
To: linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org
Cc: Masami Hiramatsu <mhiramat@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
Florent Revest <revest@...omium.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
bpf <bpf@...r.kernel.org>,
Sven Schnelle <svens@...ux.ibm.com>,
Alexei Starovoitov <ast@...nel.org>,
Jiri Olsa <jolsa@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Alan Maguire <alan.maguire@...cle.com>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
Guo Ren <guoren@...nel.org>
Subject: [PATCH v2 13/27] function_graph: Add pid tracing back to function graph tracer
From: "Steven Rostedt (Google)" <rostedt@...dmis.org>
Now that the function_graph has a main callback that handles the function
graph subops tracing, it no longer honors the pid filtering of ftrace. Add
back this logic in the function_graph code to update the gops callback for
the entry function to test if it should trace the current task or not.
Signed-off-by: Steven Rostedt (Google) <rostedt@...dmis.org>
---
include/linux/ftrace.h | 2 ++
kernel/trace/fgraph.c | 40 ++++++++++++++++++++++++++++++++++
kernel/trace/ftrace.c | 5 +++--
kernel/trace/ftrace_internal.h | 2 ++
4 files changed, 47 insertions(+), 2 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8f865689e868..e31ec8516de1 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1040,6 +1040,7 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
struct fgraph_ops *); /* entry */
extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
+bool ftrace_pids_enabled(struct ftrace_ops *ops);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -1048,6 +1049,7 @@ struct fgraph_ops {
trace_func_graph_ret_t retfunc;
struct ftrace_ops ops; /* for the hash lists */
void *private;
+ trace_func_graph_ent_t saved_func;
int idx;
};
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 3ef6db53c0bf..30bed20c655f 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -854,6 +854,41 @@ void ftrace_graph_exit_task(struct task_struct *t)
kfree(ret_stack);
}
+static int fgraph_pid_func(struct ftrace_graph_ent *trace,
+ struct fgraph_ops *gops)
+{
+ struct trace_array *tr = gops->ops.private;
+ int pid;
+
+ if (tr) {
+ pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
+ if (pid == FTRACE_PID_IGNORE)
+ return 0;
+ if (pid != FTRACE_PID_TRACE &&
+ pid != current->pid)
+ return 0;
+ }
+
+ return gops->saved_func(trace, gops);
+}
+
+void fgraph_update_pid_func(void)
+{
+ struct fgraph_ops *gops;
+ struct ftrace_ops *op;
+
+ if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
+ return;
+
+ list_for_each_entry(op, &graph_ops.subop_list, list) {
+ if (op->flags & FTRACE_OPS_FL_PID) {
+ gops = container_of(op, struct fgraph_ops, ops);
+ gops->entryfunc = ftrace_pids_enabled(op) ?
+ fgraph_pid_func : gops->saved_func;
+ }
+ }
+}
+
/* Allocate a return stack for each task */
static int start_graph_tracing(void)
{
@@ -931,11 +966,15 @@ int register_ftrace_graph(struct fgraph_ops *gops)
command = FTRACE_START_FUNC_RET;
}
+ /* Always save the function, and reset at unregistering */
+ gops->saved_func = gops->entryfunc;
+
ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
error:
if (ret) {
fgraph_array[i] = &fgraph_stub;
ftrace_graph_active--;
+ gops->saved_func = NULL;
}
out:
mutex_unlock(&ftrace_lock);
@@ -979,5 +1018,6 @@ void unregister_ftrace_graph(struct fgraph_ops *gops)
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
}
out:
+ gops->saved_func = NULL;
mutex_unlock(&ftrace_lock);
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3615614bd116..41fabc6d30e4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -100,7 +100,7 @@ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
/* What to set function_trace_op to */
static struct ftrace_ops *set_function_trace_op;
-static bool ftrace_pids_enabled(struct ftrace_ops *ops)
+bool ftrace_pids_enabled(struct ftrace_ops *ops)
{
struct trace_array *tr;
@@ -402,10 +402,11 @@ static void ftrace_update_pid_func(void)
if (op->flags & FTRACE_OPS_FL_PID) {
op->func = ftrace_pids_enabled(op) ?
ftrace_pid_func : op->saved_func;
- ftrace_update_trampoline(op);
}
} while_for_each_ftrace_op(op);
+ fgraph_update_pid_func();
+
update_ftrace_function();
}
diff --git a/kernel/trace/ftrace_internal.h b/kernel/trace/ftrace_internal.h
index cdfd12c44ab4..bfba10c2fcf1 100644
--- a/kernel/trace/ftrace_internal.h
+++ b/kernel/trace/ftrace_internal.h
@@ -43,8 +43,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern int ftrace_graph_active;
+extern void fgraph_update_pid_func(void);
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
# define ftrace_graph_active 0
+static inline void fgraph_update_pid_func(void) {}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#else /* !CONFIG_FUNCTION_TRACER */
--
2.43.0
Powered by blists - more mailing lists