lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190501203650.29548-2-viktor.rosendahl@gmail.com>
Date:   Wed,  1 May 2019 22:36:47 +0200
From:   Viktor Rosendahl <viktor.rosendahl@...il.com>
To:     Steven Rostedt <rostedt@...dmis.org>,
        Ingo Molnar <mingo@...hat.com>, linux-kernel@...r.kernel.org
Cc:     Joel Fernandes <joel@...lfernandes.org>,
        Viktor Rosendahl <viktor.rosendahl@...il.com>
Subject: [PATCH v2 1/4] ftrace: Implement fs notification for preempt/irqsoff tracers

This patch implements the feature that the trace file, e.g.
/sys/kernel/debug/tracing/trace will receive notifications through
the fsnotify framework when a new trace is available.

This makes it possible to implement a user space program that can,
with equal probability, obtain traces of latencies that occur
immediately after each other in spite of the fact that the
preempt/irqsoff tracers operate in overwrite mode.

Signed-off-by: Viktor Rosendahl <viktor.rosendahl@...il.com>
---
 kernel/trace/Kconfig         | 10 ++++++++++
 kernel/trace/trace.c         | 31 +++++++++++++++++++++++++++++--
 kernel/trace/trace.h         |  5 +++++
 kernel/trace/trace_irqsoff.c | 35 +++++++++++++++++++++++++++++++++++
 4 files changed, 79 insertions(+), 2 deletions(-)

diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8bd1d6d001d7..35e5fd3224f6 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -234,6 +234,16 @@ config PREEMPT_TRACER
 	  enabled. This option and the irqs-off timing option can be
 	  used together or separately.)
 
+	config PREEMPTIRQ_FSNOTIFY
+	bool "Generate fsnotify events for the latency tracers"
+	default n
+	depends on (IRQSOFF_TRACER || PREEMPT_TRACER) && FSNOTIFY
+	help
+	  This option will enable the generation of fsnotify events for the
+	  trace file. This makes it possible for userspace to be notified about
+	  modification of /sys/kernel/debug/tracing/trace through the inotify
+	  interface.
+
 config SCHED_TRACER
 	bool "Scheduling Latency Tracer"
 	select GENERIC_TRACER
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ca1ee656d6d8..ebefb8d4e072 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -44,6 +44,8 @@
 #include <linux/trace.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/rt.h>
+#include <linux/fsnotify.h>
+#include <linux/workqueue.h>
 
 #include "trace.h"
 #include "trace_output.h"
@@ -8191,6 +8193,32 @@ static __init void create_trace_instances(struct dentry *d_tracer)
 		return;
 }
 
+#ifdef CONFIG_PREEMPTIRQ_FSNOTIFY
+
+static void trace_notify_workfn(struct work_struct *work)
+{
+	struct trace_array *tr = container_of(work, struct trace_array,
+					      notify_work);
+	fsnotify(tr->d_trace->d_inode, FS_MODIFY, tr->d_trace->d_inode,
+		 FSNOTIFY_EVENT_INODE, NULL, 0);
+}
+
+static void trace_create_trace_file(struct trace_array *tr,
+				    struct dentry *d_tracer)
+{
+	/* For notify we need to init the work structure and save the pointer */
+	INIT_WORK(&tr->notify_work, trace_notify_workfn);
+	tr->d_trace = trace_create_file("trace", 0644, d_tracer, tr,
+					&tracing_fops);
+}
+
+#else /* !CONFIG_PREEMPTIRQ_FSNOTIFY */
+
+#define trace_create_trace_file(tr, d_tracer) \
+	trace_create_file("trace", 0644, d_tracer, tr, &tracing_fops)
+
+#endif
+
 static void
 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 {
@@ -8209,8 +8237,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 	trace_create_file("trace_options", 0644, d_tracer,
 			  tr, &tracing_iter_fops);
 
-	trace_create_file("trace", 0644, d_tracer,
-			  tr, &tracing_fops);
+	trace_create_trace_file(tr, d_tracer);
 
 	trace_create_file("trace_pipe", 0444, d_tracer,
 			  tr, &tracing_pipe_fops);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d80cee49e0eb..59dc01ac52fd 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -17,6 +17,7 @@
 #include <linux/compiler.h>
 #include <linux/trace_seq.h>
 #include <linux/glob.h>
+#include <linux/workqueue.h>
 
 #ifdef CONFIG_FTRACE_SYSCALLS
 #include <asm/unistd.h>		/* For NR_SYSCALLS	     */
@@ -302,6 +303,10 @@ struct trace_array {
 	struct dentry		*options;
 	struct dentry		*percpu_dir;
 	struct dentry		*event_dir;
+#ifdef CONFIG_PREEMPTIRQ_FSNOTIFY
+	struct dentry		*d_trace;
+	struct work_struct	notify_work;
+#endif
 	struct trace_options	*topts;
 	struct list_head	systems;
 	struct list_head	events;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a745b0cee5d3..07a391e845de 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -82,6 +82,31 @@ static inline int irqsoff_display_graph(struct trace_array *tr, int set)
  */
 static __cacheline_aligned_in_smp	unsigned long max_sequence;
 
+#ifdef CONFIG_PREEMPTIRQ_FSNOTIFY
+
+static struct workqueue_struct *notify_wq;
+
+static __init void trace_file_notify_init(void)
+{
+	notify_wq = alloc_workqueue("irqsoff_notify_wq",
+				    WQ_UNBOUND | WQ_HIGHPRI, 0);
+	if (!notify_wq)
+		pr_err("Unable to allocate irqsoff_notify_wq");
+}
+
+static inline void trace_file_notify(struct trace_array *tr)
+{
+	if (likely(notify_wq))
+		queue_work(notify_wq, &tr->notify_work);
+}
+
+#else /* !CONFIG_PREEMPTIRQ_FSNOTIFY */
+
+#define trace_file_notify_init() do {} while (0)
+#define trace_file_notify(tr) do {} while (0)
+
+#endif /* !CONFIG_PREEMPTIRQ_FSNOTIFY */
+
 #ifdef CONFIG_FUNCTION_TRACER
 /*
  * Prologue for the preempt and irqs off function tracers.
@@ -323,6 +348,7 @@ check_critical_timing(struct trace_array *tr,
 	u64 T0, T1, delta;
 	unsigned long flags;
 	int pc;
+	bool notify = false;
 
 	T0 = data->preempt_timestamp;
 	T1 = ftrace_now(cpu);
@@ -353,6 +379,7 @@ check_critical_timing(struct trace_array *tr,
 	if (likely(!is_tracing_stopped())) {
 		tr->max_latency = delta;
 		update_max_tr_single(tr, current, cpu);
+		notify = true;
 	}
 
 	max_sequence++;
@@ -364,6 +391,13 @@ check_critical_timing(struct trace_array *tr,
 	data->critical_sequence = max_sequence;
 	data->preempt_timestamp = ftrace_now(cpu);
 	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+
+	/*
+	 * We are optimizing for a high threshold, meaning that this will
+	 * happen seldom
+	 */
+	if (unlikely(notify))
+		trace_file_notify(tr);
 }
 
 static nokprobe_inline void
@@ -745,6 +779,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
 
 __init static int init_irqsoff_tracer(void)
 {
+	trace_file_notify_init();
 #ifdef CONFIG_IRQSOFF_TRACER
 	register_tracer(&irqsoff_tracer);
 #endif
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ