lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 10 Feb 2010 14:45:53 -0800
From:	Tim Bird <tim.bird@...sony.com>
To:	linux kernel <linux-kernel@...r.kernel.org>,
	Steven Rostedt <srostedt@...hat.com>,
	Frederic Weisbecker <fweisbec@...il.com>
Subject: [PATCH 1/1] ftrace - add support for tracing_thresh to function_graph
 tracer

This adds support for 'tracing_thresh' to the existing function_graph
tracer.  When 'tracing_thresh' is in effect, function entry events are
not stored - only function exits are stored.  This prevents filtered
functions from filling up the log. An extra option is added to allow
showing the function name with the exit event.  (Otherwise, all you
get are closing braces.)

Note that the 'funcgraph-exit' display option is valuable by itself,
since it makes the output much more 'grep-able'.

Signed-off-by: Tim Bird <tim.bird@...sony.com>
--
 trace.c                 |    5 +++--
 trace_functions_graph.c |   20 +++++++++++++++++++-
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0df1b0f..aa39f8e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -502,9 +502,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq
*s, void *buf, size_t cnt)
 static arch_spinlock_t ftrace_max_lock =
     (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
+unsigned long __read_mostly    tracing_thresh;
+
 #ifdef CONFIG_TRACER_MAX_TRACE
 unsigned long __read_mostly    tracing_max_latency;
-unsigned long __read_mostly    tracing_thresh;
 
 /*
  * Copy the new maximum trace into the separate maximum-trace
@@ -4176,10 +4177,10 @@ static __init int tracer_init_debugfs(void)
 #ifdef CONFIG_TRACER_MAX_TRACE
     trace_create_file("tracing_max_latency", 0644, d_tracer,
             &tracing_max_latency, &tracing_max_lat_fops);
+#endif
 
     trace_create_file("tracing_thresh", 0644, d_tracer,
             &tracing_thresh, &tracing_max_lat_fops);
-#endif
 
     trace_create_file("README", 0444, d_tracer,
             NULL, &tracing_readme_fops);
diff --git a/kernel/trace/trace_functions_graph.c
b/kernel/trace/trace_functions_graph.c
index b1342c5..e019e32 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -39,6 +39,7 @@ struct fgraph_data {
 #define TRACE_GRAPH_PRINT_PROC        0x8
 #define TRACE_GRAPH_PRINT_DURATION    0x10
 #define TRACE_GRAPH_PRINT_ABS_TIME    0X20
+#define TRACE_GRAPH_PRINT_FUNC_EXIT    0X40
 
 static struct tracer_opt trace_opts[] = {
     /* Display overruns? (for self-debug purpose) */
@@ -53,6 +54,8 @@ static struct tracer_opt trace_opts[] = {
     { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
     /* Display absolute time of an entry */
     { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
+    /* Display function name on exit, instead of just closing brace */
+    { TRACER_OPT(funcgraph-exit, TRACE_GRAPH_PRINT_FUNC_EXIT) },
     { } /* Empty entry */
 };
 
@@ -202,6 +205,8 @@ static int __trace_graph_entry(struct trace_array *tr,
     return 1;
 }
 
+extern unsigned long __read_mostly tracing_thresh;
+
 int trace_graph_entry(struct ftrace_graph_ent *trace)
 {
     struct trace_array *tr = graph_array;
@@ -221,6 +226,10 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
     if (!ftrace_graph_addr(trace->func))
         return 0;
 
+    /* if tracing duration, only save function exits (omit function
entries in log) */
+    if (tracing_thresh)
+        return 1;
+
     local_irq_save(flags);
     cpu = raw_smp_processor_id();
     data = tr->data[cpu];
@@ -254,6 +263,10 @@ static void __trace_graph_return(struct trace_array
*tr,
     if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
         return;
 
+    if (tracing_thresh &&
+        (trace->rettime - trace->calltime < tracing_thresh))
+        return;
+
     event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
                       sizeof(*entry), flags, pc);
     if (!event)
@@ -891,7 +904,12 @@ print_graph_return(struct ftrace_graph_ret *trace,
struct trace_seq *s,
             return TRACE_TYPE_PARTIAL_LINE;
     }
 
-    ret = trace_seq_printf(s, "}\n");
+    if (tracer_flags.val & TRACE_GRAPH_PRINT_FUNC_EXIT) {
+        ret = trace_seq_printf(s, "%ps();\n", (void *)trace->func);
+    } else {
+        ret = trace_seq_printf(s, "}\n");
+    }
+
     if (!ret)
         return TRACE_TYPE_PARTIAL_LINE;
 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ