lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <fdaaa977c23df7bd784665e6646b327f50e3a82e.1433354807.git.tom.zanussi@linux.intel.com>
Date:	Wed,  3 Jun 2015 13:30:40 -0500
From:	Tom Zanussi <tom.zanussi@...ux.intel.com>
To:	rostedt@...dmis.org
Cc:	daniel.wagner@...-carit.de, masami.hiramatsu.pt@...achi.com,
	namhyung@...nel.org, josh@...htriplett.org, andi@...stfloor.org,
	linux-kernel@...r.kernel.org,
	Tom Zanussi <tom.zanussi@...ux.intel.com>
Subject: [PATCH v2 10/10][RFC] ftrace: Add function_hist tracer

Add a new tracer named function_hist:

  # cat /sys/kernel/debug/tracing/available_tracers

  blk mmiotrace function_hist function_graph wakeup_dl wakeup_rt wakeup
  irqsoff function nop

  # echo function_hist > /sys/kernel/debug/tracing/current_tracer
  # cat /sys/kernel/debug/tracing/function_hist

This is essentially the same as the kernel function tracer, but
instead of logging every function call into the trace buffer, it
continually updates a tracing map which keeps a running count for each
function.

As such, it provides an exhaustive enumeration of every traceable
function called in the kernel since the tracer was enabled, along with
the number of times it was called.

Example output from an overnight run:

  # cat /sys/kernel/debug/tracing/function_hist

ip: [ffffffff812002d0] inotify_release                     hitcount:          1
ip: [ffffffff81078c40] param_attr_show                     hitcount:          1
ip: [ffffffff816e2f20] inet6_csk_bind_conflict             hitcount:          1
ip: [ffffffffa04bdf50] intel_modeset_preclose              hitcount:          1
ip: [ffffffff812001f0] inotify_free_group_priv             hitcount:          1
ip: [ffffffffa04641d0] i915_gem_context_create_ioctl       hitcount:          1
ip: [ffffffffa026b100] drm_modeset_lock_all_crtcs          hitcount:          1
ip: [ffffffff811fe410] fsnotify_destroy_group              hitcount:          1
ip: [ffffffff810785e0] module_attr_show                    hitcount:          1
 .
 .
 .
ip: [ffffffff8109a3e0] add_wait_queue                      hitcount:  235927661
ip: [ffffffff81716930] _raw_spin_lock                      hitcount:  345398436
ip: [ffffffff81713150] _cond_resched                       hitcount:  378571372
ip: [ffffffff816abe90] unix_poll                           hitcount:  413026161
ip: [ffffffff815f4510] sock_poll                           hitcount:  438658385
ip: [ffffffff811dc040] __fdget                             hitcount:  640844197
ip: [ffffffff811dba90] __fget                              hitcount:  657582124
ip: [ffffffff811dbfd0] __fget_light                        hitcount:  662386005
ip: [ffffffff817164f0] _raw_spin_unlock_irqrestore         hitcount:  848890816
ip: [ffffffff817168e0] _raw_spin_lock_irqsave              hitcount:  849815447
ip: [ffffffff811c09c0] fput                                hitcount:  892233261

Totals:
    Hits: 17981015194
    Entries: 5527
    Dropped: 0

Signed-off-by: Tom Zanussi <tom.zanussi@...ux.intel.com>
---
 kernel/trace/Makefile               |   1 +
 kernel/trace/trace.c                |   3 +
 kernel/trace/trace.h                |  11 ++
 kernel/trace/trace_functions_hist.c | 345 ++++++++++++++++++++++++++++++++++++
 4 files changed, 360 insertions(+)
 create mode 100644 kernel/trace/trace_functions_hist.c

diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 7faace3..eb2bb50 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_TRACING) += tracing_map.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+obj-$(CONFIG_FUNCTION_TRACER) += trace_functions_hist.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 147128f..aa1a4b5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6739,6 +6739,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 	if (ftrace_create_function_files(tr, d_tracer))
 		WARN(1, "Could not allocate function filter files");
 
+	if (ftrace_create_function_hist_files(tr, d_tracer))
+		WARN(1, "Could not allocate function_hist filter files");
+
 #ifdef CONFIG_TRACER_SNAPSHOT
 	trace_create_file("snapshot", 0644, d_tracer,
 			  tr, &snapshot_fops);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5e2e3b0..502f88b 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -820,7 +820,10 @@ static inline int ftrace_trace_task(struct task_struct *task)
 extern int ftrace_is_dead(void);
 int ftrace_create_function_files(struct trace_array *tr,
 				 struct dentry *parent);
+int ftrace_create_function_hist_files(struct trace_array *tr,
+				      struct dentry *parent);
 void ftrace_destroy_function_files(struct trace_array *tr);
+void ftrace_destroy_function_hist_files(struct trace_array *tr);
 void ftrace_init_global_array_ops(struct trace_array *tr);
 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
 void ftrace_reset_array_ops(struct trace_array *tr);
@@ -837,7 +840,15 @@ ftrace_create_function_files(struct trace_array *tr,
 {
 	return 0;
 }
+static inline int
+ftrace_create_function_hist_files(struct trace_array *tr,
+				  struct dentry *parent)
+{
+	return 0;
+}
 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
+static inline void
+ftrace_destroy_function_hist_files(struct trace_array *tr) { }
 static inline __init void
 ftrace_init_global_array_ops(struct trace_array *tr) { }
 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
diff --git a/kernel/trace/trace_functions_hist.c b/kernel/trace/trace_functions_hist.c
new file mode 100644
index 0000000..136ca76
--- /dev/null
+++ b/kernel/trace/trace_functions_hist.c
@@ -0,0 +1,345 @@
+/*
+ * histogram-based function tracer
+ *
+ * Copyright (C) 2015 Tom Zanussi <tom.zanussi@...ux.intel.com>
+ *
+ * Based on code from the function_tracer, that is:
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@...hat.com>
+ * Copyright (C) 2008 Ingo Molnar <mingo@...hat.com>
+ */
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/ftrace.h>
+#include <linux/slab.h>
+
+#include "tracing_map.h"
+#include "trace.h"
+
+static void tracing_start_function_hist(struct trace_array *tr);
+static void tracing_stop_function_hist(struct trace_array *tr);
+static void
+function_hist_call(unsigned long ip, unsigned long parent_ip,
+		   struct ftrace_ops *op, struct pt_regs *pt_regs);
+
+enum hist_key_flags {
+	HIST_FIELD_SYM		= 1,
+	HIST_FIELD_HEX		= 2,
+};
+
+struct function_hist_data {
+	atomic64_t			total_hits;
+	atomic64_t			drops;
+	unsigned long			key_flags;
+	struct tracing_map_sort_key	sort_key;
+	struct tracing_map		*map;
+};
+
+#define FUNCTION_HIST_BITS_DEFAULT	16
+#define HITCOUNT_IDX			0
+
+static struct function_hist_data *function_hist_data;
+
+static int allocate_ftrace_ops(struct trace_array *tr)
+{
+	struct ftrace_ops *ops;
+
+	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+	if (!ops)
+		return -ENOMEM;
+
+	ops->func = function_hist_call;
+	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
+
+	tr->ops = ops;
+	ops->private = tr;
+
+	return 0;
+}
+
+static int function_hist_init(struct trace_array *tr)
+{
+	ftrace_func_t func;
+
+	/*
+	 * Instance trace_arrays get their ops allocated
+	 * at instance creation. Unless it failed
+	 * the allocation.
+	 */
+	if (!tr->ops)
+		return -ENOMEM;
+
+	func = function_hist_call;
+
+	ftrace_init_array_ops(tr, func);
+
+	tracing_start_function_hist(tr);
+
+	return 0;
+}
+
+static void function_hist_reset(struct trace_array *tr)
+{
+	tracing_stop_function_hist(tr);
+	ftrace_reset_array_ops(tr);
+}
+
+static void
+function_hist_call(unsigned long ip, unsigned long parent_ip,
+		   struct ftrace_ops *op, struct pt_regs *pt_regs)
+{
+	struct trace_array *tr = op->private;
+	struct tracing_map_elt *elt;
+	void *key;
+	int bit;
+
+	if (unlikely(!tr->function_enabled))
+		return;
+
+	preempt_disable_notrace();
+
+	bit = trace_test_and_set_recursion(TRACE_FTRACE_START,
+					   TRACE_FTRACE_MAX);
+	if (bit < 0)
+		goto out;
+
+	if (atomic64_read(&function_hist_data->drops)) {
+		atomic64_inc(&function_hist_data->drops);
+		goto out_clear;
+	}
+
+	key = (void *)&ip;
+	elt = tracing_map_insert(function_hist_data->map, key);
+	if (elt)
+		tracing_map_update_sum(elt, HITCOUNT_IDX, 1);
+	else
+		atomic64_inc(&function_hist_data->drops);
+
+	atomic64_inc(&function_hist_data->total_hits);
+ out_clear:
+	trace_clear_recursion(bit);
+ out:
+	preempt_enable_notrace();
+}
+
+static void destroy_hist_data(struct function_hist_data *hist_data)
+{
+	tracing_map_destroy(hist_data->map);
+	kfree(hist_data);
+}
+
+static inline void create_sort_key(struct function_hist_data *hist_data)
+{
+	hist_data->sort_key.field_idx = HITCOUNT_IDX;
+	hist_data->sort_key.descending = false;
+}
+
+static int create_tracing_map_fields(struct function_hist_data *hist_data)
+{
+	struct tracing_map *map = hist_data->map;
+	unsigned int idx;
+
+	idx = tracing_map_add_sum_field(map);
+	if (idx < 0)
+		return idx;
+
+	idx = tracing_map_add_key_field(map, 0, tracing_map_cmp_none);
+	if (idx < 0)
+		return idx;
+
+	return 0;
+}
+
+static struct function_hist_data *create_hist_data(unsigned int map_bits)
+{
+	struct function_hist_data *hist_data;
+	unsigned int key_size;
+	int ret = 0;
+
+	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
+	if (!hist_data)
+		return NULL;
+
+	create_sort_key(hist_data);
+
+	key_size = sizeof(unsigned long); /* ip */
+	hist_data->map = tracing_map_create(map_bits, key_size,
+					    NULL, hist_data);
+	if (IS_ERR(hist_data->map)) {
+		ret = PTR_ERR(hist_data->map);
+		hist_data->map = NULL;
+		goto free;
+	}
+
+	ret = create_tracing_map_fields(hist_data);
+	if (ret)
+		goto free;
+
+	ret = tracing_map_init(hist_data->map);
+	if (ret)
+		goto free;
+ out:
+	return hist_data;
+ free:
+	destroy_hist_data(hist_data);
+	if (ret)
+		hist_data = ERR_PTR(ret);
+	else
+		hist_data = NULL;
+
+	goto out;
+}
+
+static void tracing_start_function_hist(struct trace_array *tr)
+{
+	unsigned int hist_trigger_bits = FUNCTION_HIST_BITS_DEFAULT;
+	struct function_hist_data *hist_data;
+
+	if (function_hist_data) {
+		destroy_hist_data(function_hist_data);
+		function_hist_data = NULL;
+	}
+
+	hist_data = create_hist_data(hist_trigger_bits);
+	if (IS_ERR(hist_data))
+		return;
+
+	hist_data->key_flags |= HIST_FIELD_SYM;
+
+	function_hist_data = hist_data;
+
+	tr->function_enabled = 0;
+	register_ftrace_function(tr->ops);
+	tr->function_enabled = 1;
+}
+
+static void tracing_stop_function_hist(struct trace_array *tr)
+{
+	tr->function_enabled = 0;
+	unregister_ftrace_function(tr->ops);
+}
+
+static void function_hist_entry_print(struct seq_file *m,
+				      struct function_hist_data *hist_data,
+				      void *key, struct tracing_map_elt *elt)
+{
+	char str[KSYM_SYMBOL_LEN];
+	u64 uval;
+
+	if (hist_data->key_flags & HIST_FIELD_SYM) {
+		uval = *(u64 *)key;
+		kallsyms_lookup(uval, NULL, NULL, NULL, str);
+		seq_printf(m, "ip: [%llx] %-35s", uval, str);
+	} else if (hist_data->key_flags & HIST_FIELD_HEX) {
+		uval = *(u64 *)key;
+		seq_printf(m, "ip: %llx", uval);
+	} else {
+		uval = *(u64 *)key;
+		seq_printf(m, "ip: %10llu", uval);
+	}
+
+	seq_printf(m, " hitcount: %10llu",
+		   tracing_map_read_sum(elt, HITCOUNT_IDX));
+
+	seq_puts(m, "\n");
+}
+
+static int print_entries(struct seq_file *m,
+			 struct function_hist_data *hist_data)
+{
+	struct tracing_map_sort_entry **sort_entries = NULL;
+	struct tracing_map *map = hist_data->map;
+	unsigned int i, n_entries;
+
+	n_entries = tracing_map_sort_entries(map, &hist_data->sort_key, 1,
+					     &sort_entries);
+	if (n_entries < 0)
+		return n_entries;
+
+	for (i = 0; i < n_entries; i++)
+		function_hist_entry_print(m, hist_data,
+					  sort_entries[i]->key,
+					  sort_entries[i]->elt);
+
+	tracing_map_destroy_sort_entries(sort_entries, n_entries);
+
+	return n_entries;
+}
+
+static int hist_show(struct seq_file *m, void *v)
+{
+	int n_entries, ret = 0;
+
+	mutex_lock(&trace_types_lock);
+
+	if (!function_hist_data)
+		goto out_unlock;
+
+	n_entries = print_entries(m, function_hist_data);
+	if (n_entries < 0) {
+		ret = n_entries;
+		n_entries = 0;
+	}
+
+	seq_printf(m, "\nTotals:\n    Hits: %lu\n    Entries: %u\n    Dropped: %lu\n",
+		   atomic64_read(&function_hist_data->total_hits),
+		   n_entries, atomic64_read(&function_hist_data->drops));
+ out_unlock:
+	mutex_unlock(&trace_types_lock);
+
+	return ret;
+}
+
+static int function_hist_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hist_show, file);
+}
+
+const struct file_operations function_hist_fops = {
+	.open = function_hist_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+int ftrace_create_function_hist_files(struct trace_array *tr,
+				      struct dentry *parent)
+{
+	struct dentry *d_tracer;
+	int ret;
+
+	d_tracer = tracing_init_dentry();
+	if (IS_ERR(d_tracer))
+		return 0;
+
+	trace_create_file("function_hist", 0444, d_tracer,
+			NULL, &function_hist_fops);
+
+	ret = allocate_ftrace_ops(tr);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void ftrace_destroy_function_hist_files(struct trace_array *tr)
+{
+	kfree(tr->ops);
+	tr->ops = NULL;
+}
+
+static struct tracer function_hist __tracer_data = {
+	.name		= "function_hist",
+	.init		= function_hist_init,
+	.reset		= function_hist_reset,
+	.allow_instances = true,
+#ifdef CONFIG_FTRACE_SELFTEST
+	.selftest	= trace_selftest_startup_function,
+#endif
+};
+
+static __init int init_function_hist(void)
+{
+	return register_tracer(&function_hist);
+}
+fs_initcall(init_function_hist);
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ