[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20130315195123.872190653@goodmis.org>
Date: Fri, 15 Mar 2013 15:39:45 -0400
From: Steven Rostedt <rostedt@...dmis.org>
To: linux-kernel@...r.kernel.org
Cc: Ingo Molnar <mingo@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Frederic Weisbecker <fweisbec@...il.com>,
Dan Carpenter <dan.carpenter@...cle.com>,
Tom Zanussi <tom.zanussi@...ux.intel.com>
Subject: [for-next][PATCH 10/20] tracing: Add a way to soft disable trace events
From: "Steven Rostedt (Red Hat)" <rostedt@...dmis.org>
In order to let triggers enable or disable events, we need a 'soft'
method for doing so. For example, if a function probe is added that
lets a user enable or disable events when a function is called, that
change must be done without taking locks or a mutex, and definitely
it can't sleep. But the full enabling of a tracepoint is expensive.
By adding a 'SOFT_DISABLE' flag, and converting the flags to be updated
without the protection of a mutex (using set/clear_bit()), this soft
disable flag can be used to allow critical sections to enable or disable
events from being traced (after the event has been placed into "SOFT_MODE").
Some caveats though: The comm recorder (to map pids with a comm) can not
be soft disabled (yet). If you disable an event with with a "soft"
disable and wait a while before reading the trace, the comm cache may be
replaced and you'll get a bunch of <...> for comms in the trace.
Reading the "enable" file for an event that is disabled will now give
you "0*" where the '*' denotes that the tracepoint is still active but
the event itself is "disabled".
[ fixed _BIT used in & operation : thanks to Dan Carpenter and smatch ]
Cc: Dan Carpenter <dan.carpenter@...cle.com>
Cc: Tom Zanussi <tom.zanussi@...ux.intel.com>
Signed-off-by: Steven Rostedt <rostedt@...dmis.org>
---
include/linux/ftrace_event.h | 20 +++++++----
include/trace/ftrace.h | 8 +++++
kernel/trace/trace_events.c | 75 ++++++++++++++++++++++++++++++++++--------
3 files changed, 84 insertions(+), 19 deletions(-)
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4cb6cd8..4e28b01 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -251,16 +251,23 @@ struct ftrace_subsystem_dir;
enum {
FTRACE_EVENT_FL_ENABLED_BIT,
FTRACE_EVENT_FL_RECORDED_CMD_BIT,
+ FTRACE_EVENT_FL_SOFT_MODE_BIT,
+ FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
};
/*
* Ftrace event file flags:
* ENABLED - The event is enabled
* RECORDED_CMD - The comms should be recorded at sched_switch
+ * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
+ * SOFT_DISABLED - When set, do not trace the event (even though its
+ * tracepoint may be enabled)
*/
enum {
FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
+ FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
+ FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
};
struct ftrace_event_file {
@@ -274,17 +281,18 @@ struct ftrace_event_file {
* 32 bit flags:
* bit 0: enabled
* bit 1: enabled cmd record
+ * bit 2: enable/disable with the soft disable bit
+ * bit 3: soft disabled
*
- * Changes to flags must hold the event_mutex.
- *
- * Note: Reads of flags do not hold the event_mutex since
- * they occur in critical sections. But the way flags
+ * Note: The bits must be set atomically to prevent races
+ * from other writers. Reads of flags do not need to be in
+ * sync as they occur in critical sections. But the way flags
* is currently used, these changes do not affect the code
* except that when a change is made, it may have a slight
* delay in propagating the changes to other CPUs due to
- * caching and such.
+ * caching and such. Which is mostly OK ;-)
*/
- unsigned int flags;
+ unsigned long flags;
};
#define __TRACE_EVENT_FLAGS(name, value) \
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index bbf09c2..4bda044 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -413,6 +413,10 @@ static inline notrace int ftrace_get_offsets_##call( \
* int __data_size;
* int pc;
*
+ * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
+ * &ftrace_file->flags))
+ * return;
+ *
* local_save_flags(irq_flags);
* pc = preempt_count();
*
@@ -518,6 +522,10 @@ ftrace_raw_event_##call(void *__data, proto) \
int __data_size; \
int pc; \
\
+ if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \
+ &ftrace_file->flags)) \
+ return; \
+ \
local_save_flags(irq_flags); \
pc = preempt_count(); \
\
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 38b54c5..106640b 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -205,37 +205,77 @@ void trace_event_enable_cmd_record(bool enable)
if (enable) {
tracing_start_cmdline_record();
- file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
+ set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
} else {
tracing_stop_cmdline_record();
- file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
+ clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
}
} while_for_each_event_file();
mutex_unlock(&event_mutex);
}
-static int ftrace_event_enable_disable(struct ftrace_event_file *file,
- int enable)
+static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
+ int enable, int soft_disable)
{
struct ftrace_event_call *call = file->event_call;
int ret = 0;
+ int disable;
switch (enable) {
case 0:
- if (file->flags & FTRACE_EVENT_FL_ENABLED) {
- file->flags &= ~FTRACE_EVENT_FL_ENABLED;
+ /*
+ * When soft_disable is set and enable is cleared, we want
+ * to clear the SOFT_DISABLED flag but leave the event in the
+ * state that it was. That is, if the event was enabled and
+ * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
+ * is set we do not want the event to be enabled before we
+ * clear the bit.
+ *
+ * When soft_disable is not set but the SOFT_MODE flag is,
+ * we do nothing. Do not disable the tracepoint, otherwise
+ * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
+ */
+ if (soft_disable) {
+ disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
+ clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+ } else
+ disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
+
+ if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
+ clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
tracing_stop_cmdline_record();
- file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
+ clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
}
call->class->reg(call, TRACE_REG_UNREGISTER, file);
}
+ /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
+ if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+ set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
break;
case 1:
+ /*
+ * When soft_disable is set and enable is set, we want to
+ * register the tracepoint for the event, but leave the event
+ * as is. That means, if the event was already enabled, we do
+ * nothing (but set SOFT_MODE). If the event is disabled, we
+ * set SOFT_DISABLED before enabling the event tracepoint, so
+ * it still seems to be disabled.
+ */
+ if (!soft_disable)
+ clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+ else
+ set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+
if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
+
+ /* Keep the event disabled, when going to SOFT_MODE. */
+ if (soft_disable)
+ set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+
if (trace_flags & TRACE_ITER_RECORD_CMD) {
tracing_start_cmdline_record();
- file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
+ set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
}
ret = call->class->reg(call, TRACE_REG_REGISTER, file);
if (ret) {
@@ -244,7 +284,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_file *file,
"%s\n", call->name);
break;
}
- file->flags |= FTRACE_EVENT_FL_ENABLED;
+ set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
/* WAS_ENABLED gets set but never cleared. */
call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
@@ -255,6 +295,12 @@ static int ftrace_event_enable_disable(struct ftrace_event_file *file,
return ret;
}
+static int ftrace_event_enable_disable(struct ftrace_event_file *file,
+ int enable)
+{
+ return __ftrace_event_enable_disable(file, enable, 0);
+}
+
static void ftrace_clear_events(struct trace_array *tr)
{
struct ftrace_event_file *file;
@@ -547,12 +593,15 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
struct ftrace_event_file *file = filp->private_data;
char *buf;
- if (file->flags & FTRACE_EVENT_FL_ENABLED)
- buf = "1\n";
- else
+ if (file->flags & FTRACE_EVENT_FL_ENABLED) {
+ if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
+ buf = "0*\n";
+ else
+ buf = "1\n";
+ } else
buf = "0\n";
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
}
static ssize_t
--
1.7.10.4
Download attachment "signature.asc" of type "application/pgp-signature" (491 bytes)
Powered by blists - more mailing lists