[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1466506666-28804-1-git-send-email-chuhu@redhat.com>
Date: Tue, 21 Jun 2016 18:57:46 +0800
From: Chunyu Hu <chuhu@...hat.com>
To: rostedt@...dmis.org
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH] tracing: Fix oops caused by graph notrace filter
wakeup tracer can use function_graph trace when display_graph trace
option is setup by user via tracefs, and bypass the set_graph_function
and set_graph_notrace. But the bypass of set_graph_notrace is not clean.
Although wakeup_graph_entry does most of the bypass, and both the enry
and exit event will be submitted to the trace ring buffer, the ret_stack
index, which will be assigned to depth field of graph enrty event is not
handled. The issue is that the depth is used as the array index of
fgraph_cpu_data and can cause an oops when it's negative. irqsoff tracer
has same issue. To see the oops:
echo 1 > options/display_graph
echo schedule > set_graph_notrace
echo wakeup > current_tracer
cat trace
cat trace
Making ftrace_graph_notrace_addr always return false when tracers need
to bypass it is a proposed fix.
Signed-off-by: Chunyu Hu <chuhu@...hat.com>
---
kernel/trace/ftrace.c | 1 +
kernel/trace/trace.h | 4 ++++
kernel/trace/trace_functions_graph.c | 2 ++
kernel/trace/trace_irqsoff.c | 7 +++++++
kernel/trace/trace_sched_wakeup.c | 8 ++++++++
5 files changed, 22 insertions(+)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a680482..bb06828 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4471,6 +4471,7 @@ static DEFINE_MUTEX(graph_lock);
int ftrace_graph_count;
int ftrace_graph_notrace_count;
+int ftrace_graph_ignore_notrace;
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5167c36..b089e04 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -757,6 +757,7 @@ extern void __trace_graph_return(struct trace_array *tr,
extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
extern int ftrace_graph_notrace_count;
+extern int ftrace_graph_ignore_notrace;
extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
static inline int ftrace_graph_addr(unsigned long addr)
@@ -791,6 +792,9 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
if (!ftrace_graph_notrace_count)
return 0;
+ if (unlikely(ftrace_graph_ignore_notrace))
+ return 0;
+
for (i = 0; i < ftrace_graph_notrace_count; i++) {
if (addr == ftrace_graph_notrace_funcs[i])
return 1;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 3a0244f..24d92f0 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -822,6 +822,8 @@ print_graph_entry_nested(struct trace_iterator *iter,
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu_data->depth = call->depth;
+ WARN(call->depth < 0, "call->depth = %d\n", call->depth);
+
/* Save this function pointer to see if the exit matches */
if (call->depth < FTRACE_RETFUNC_DEPTH)
cpu_data->enter_funcs[call->depth] = call->func;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 03cdff8..3d76e0f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -32,6 +32,8 @@ static int trace_type __read_mostly;
static int save_flags;
+extern int ftrace_graph_ignore_notrace;
+
static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph);
@@ -629,6 +631,9 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
ftrace_init_array_ops(tr, irqsoff_tracer_call);
+ /* bypass function_graph notrace filter */
+ ftrace_graph_ignore_notrace = 1;
+
/* Only toplevel instance supports graph tracing */
if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
is_graph(tr))))
@@ -650,6 +655,8 @@ static void irqsoff_tracer_reset(struct trace_array *tr)
ftrace_reset_array_ops(tr);
irqsoff_busy = false;
+
+ ftrace_graph_ignore_notrace = 0;
}
static void irqsoff_tracer_start(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 9d4399b..81dd566 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -37,6 +37,8 @@ static void __wakeup_reset(struct trace_array *tr);
static int save_flags;
+extern int ftrace_graph_ignore_notrace;
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int wakeup_display_graph(struct trace_array *tr, int set);
# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
@@ -672,6 +674,10 @@ static int __wakeup_tracer_init(struct trace_array *tr)
tr->max_latency = 0;
wakeup_trace = tr;
ftrace_init_array_ops(tr, wakeup_tracer_call);
+
+ /* bypass function_graph notrace filter */
+ ftrace_graph_ignore_notrace = 1;
+
start_wakeup_tracer(tr);
wakeup_busy = true;
@@ -721,6 +727,8 @@ static void wakeup_tracer_reset(struct trace_array *tr)
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
ftrace_reset_array_ops(tr);
wakeup_busy = false;
+
+ ftrace_graph_ignore_notrace = 0;
}
static void wakeup_tracer_start(struct trace_array *tr)
--
1.8.3.1
Powered by blists - more mailing lists