[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1466596453-16345-1-git-send-email-chuhu@redhat.com>
Date: Wed, 22 Jun 2016 19:54:13 +0800
From: Chunyu Hu <chuhu@...hat.com>
To: rostedt@...dmis.org
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH v2] tracing: Fix oops caused by graph notrace filter
wakeup tracer can use function_graph trace when display_graph trace
option is setup by user via tracefs, and bypass the set_graph_function
and set_graph_notrace. But the bypass of set_graph_notrace is not clean.
Although wakeup_graph_entry does most of the bypass, and both the enry
and exit event will be submitted to the trace ring buffer, the ret_stack
index, which will be assigned to depth field of graph enrty event is not
handled. The issue is that the depth is used as the array index of
fgraph_cpu_data and can cause an oops when it's negative. irqsoff tracer
has same issue. To see the oops:
echo 1 > options/display_graph
echo schedule > set_graph_notrace
echo wakeup > current_tracer
cat trace
cat trace
Making ftrace_graph_notrace_addr always return false when tracers need
to bypass it is a proposed fix.
Signed-off-by: Chunyu Hu <chuhu@...hat.com>
---
kernel/trace/ftrace.c | 1 +
kernel/trace/trace.h | 4 ++++
kernel/trace/trace_functions_graph.c | 2 ++
kernel/trace/trace_irqsoff.c | 10 ++++++++++
kernel/trace/trace_sched_wakeup.c | 11 +++++++++++
5 files changed, 28 insertions(+)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a680482..bb06828 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4471,6 +4471,7 @@ static DEFINE_MUTEX(graph_lock);
int ftrace_graph_count;
int ftrace_graph_notrace_count;
+int ftrace_graph_ignore_notrace;
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5167c36..b089e04 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -757,6 +757,7 @@ extern void __trace_graph_return(struct trace_array *tr,
extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
extern int ftrace_graph_notrace_count;
+extern int ftrace_graph_ignore_notrace;
extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
static inline int ftrace_graph_addr(unsigned long addr)
@@ -791,6 +792,9 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
if (!ftrace_graph_notrace_count)
return 0;
+ if (unlikely(ftrace_graph_ignore_notrace))
+ return 0;
+
for (i = 0; i < ftrace_graph_notrace_count; i++) {
if (addr == ftrace_graph_notrace_funcs[i])
return 1;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 3a0244f..24d92f0 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -822,6 +822,8 @@ print_graph_entry_nested(struct trace_iterator *iter,
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu_data->depth = call->depth;
+ WARN(call->depth < 0, "call->depth = %d\n", call->depth);
+
/* Save this function pointer to see if the exit matches */
if (call->depth < FTRACE_RETFUNC_DEPTH)
cpu_data->enter_funcs[call->depth] = call->func;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 03cdff8..cb6ebcb 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -58,6 +58,7 @@ irq_trace(void)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int irqsoff_display_graph(struct trace_array *tr, int set);
+extern int ftrace_graph_ignore_notrace;
# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
#else
static inline int irqsoff_display_graph(struct trace_array *tr, int set)
@@ -629,6 +630,11 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
ftrace_init_array_ops(tr, irqsoff_tracer_call);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* bypass function_graph notrace filter */
+ ftrace_graph_ignore_notrace = 1;
+#endif
+
/* Only toplevel instance supports graph tracing */
if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
is_graph(tr))))
@@ -650,6 +656,10 @@ static void irqsoff_tracer_reset(struct trace_array *tr)
ftrace_reset_array_ops(tr);
irqsoff_busy = false;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ftrace_graph_ignore_notrace = 0;
+#endif
}
static void irqsoff_tracer_start(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 9d4399b..d521e43 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -38,6 +38,7 @@ static void __wakeup_reset(struct trace_array *tr);
static int save_flags;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern int ftrace_graph_ignore_notrace;
static int wakeup_display_graph(struct trace_array *tr, int set);
# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
#else
@@ -672,6 +673,12 @@ static int __wakeup_tracer_init(struct trace_array *tr)
tr->max_latency = 0;
wakeup_trace = tr;
ftrace_init_array_ops(tr, wakeup_tracer_call);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* bypass function_graph notrace filter */
+ ftrace_graph_ignore_notrace = 1;
+#endif
+
start_wakeup_tracer(tr);
wakeup_busy = true;
@@ -721,6 +728,10 @@ static void wakeup_tracer_reset(struct trace_array *tr)
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
ftrace_reset_array_ops(tr);
wakeup_busy = false;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ftrace_graph_ignore_notrace = 0;
+#endif
}
static void wakeup_tracer_start(struct trace_array *tr)
--
1.8.3.1
Powered by blists - more mailing lists