[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190319171206.97107-3-dianders@chromium.org>
Date: Tue, 19 Mar 2019 10:12:06 -0700
From: Douglas Anderson <dianders@...omium.org>
To: Steven Rostedt <rostedt@...dmis.org>,
Ingo Molnar <mingo@...hat.com>,
Jason Wessel <jason.wessel@...driver.com>,
Daniel Thompson <daniel.thompson@...aro.org>
Cc: kgdb-bugreport@...ts.sourceforge.net,
Brian Norris <briannorris@...omium.org>,
Douglas Anderson <dianders@...omium.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH v6 3/3] tracing: kdb: Allow ftdump to skip all but the last few entries
The 'ftdump' command in kdb is currently a bit of a last resort, at
least if you have lots of traces turned on. It's going to print a
whole boatload of data out your serial port which is probably running
at 115200. This could easily take many, many minutes.
Usually you're most interested in what's at the _end_ of the ftrace
buffer, AKA what happened most recently. That means you've got to
wait the full time for the dump. The 'ftdump' command does attempt to
help you a little bit by allowing you to skip a fixed number of
entries. Unfortunately it provides no way for you to know how many
entries you should skip.
Let's do similar to python and allow you to use a negative number to
indicate that you want to skip all entries except the last few. This
allows you to quickly see what you want.
Note that we also change the printout in ftdump to print the
(positive) number of entries actually skipped since that could be
helpful to know when you've specified a negative skip count.
Signed-off-by: Douglas Anderson <dianders@...omium.org>
---
Changes in v6:
- Keep tracing disabled between counting and dumping.
- Remove Daniel Thompson Ack due to changes between v5 and v6.
Changes in v5:
- Only print skipping info if we skipped something (Daniel/Steven)
- Add Daniel Thompson Ack.
Changes in v4:
- Now uses trace_total_entries() / trace_total_entries_cpu().
- Based upon new patch that renames "lines" to "entries".
Changes in v3:
- Optimize counting as per Steven Rostedt.
- Down to 1 patch since patch #1 from v2 landed.
kernel/trace/trace_kdb.c | 45 +++++++++++++++++++++++++++-------------
1 file changed, 31 insertions(+), 14 deletions(-)
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 4b666643d69f..6c1ae6b752d1 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -17,29 +17,25 @@
#include "trace.h"
#include "trace_output.h"
+static struct trace_iterator iter;
+static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
+
static void ftrace_dump_buf(int skip_entries, long cpu_file)
{
- /* use static because iter can be a bit big for the stack */
- static struct trace_iterator iter;
- static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
struct trace_array *tr;
unsigned int old_userobj;
int cnt = 0, cpu;
- trace_init_global_iter(&iter);
- iter.buffer_iter = buffer_iter;
tr = iter.tr;
- for_each_tracing_cpu(cpu) {
- atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
- }
-
old_userobj = tr->trace_flags;
/* don't look at user memory in panic mode */
tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
kdb_printf("Dumping ftrace buffer:\n");
+ if (skip_entries)
+ kdb_printf("(skipping %d entries)\n", skip_entries);
/* reset all but tr, trace, and overruns */
memset(&iter.seq, 0,
@@ -89,10 +85,6 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
out:
tr->trace_flags = old_userobj;
- for_each_tracing_cpu(cpu) {
- atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
- }
-
for_each_tracing_cpu(cpu) {
if (iter.buffer_iter[cpu]) {
ring_buffer_read_finish(iter.buffer_iter[cpu]);
@@ -109,6 +101,8 @@ static int kdb_ftdump(int argc, const char **argv)
int skip_entries = 0;
long cpu_file;
char *cp;
+ int cnt;
+ int cpu;
if (argc > 2)
return KDB_ARGCOUNT;
@@ -129,7 +123,29 @@ static int kdb_ftdump(int argc, const char **argv)
}
kdb_trap_printk++;
+
+ trace_init_global_iter(&iter);
+ iter.buffer_iter = buffer_iter;
+
+ for_each_tracing_cpu(cpu) {
+ atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+ }
+
+ /* A negative skip_entries means skip all but the last entries */
+ if (skip_entries < 0) {
+ if (cpu_file == RING_BUFFER_ALL_CPUS)
+ cnt = trace_total_entries(NULL);
+ else
+ cnt = trace_total_entries_cpu(NULL, cpu_file);
+ skip_entries = max(cnt + skip_entries, 0);
+ }
+
ftrace_dump_buf(skip_entries, cpu_file);
+
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+ }
+
kdb_trap_printk--;
return 0;
@@ -138,7 +154,8 @@ static int kdb_ftdump(int argc, const char **argv)
static __init int kdb_ftrace_register(void)
{
kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
- "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
+ "Dump ftrace log; -skip dumps last #entries", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
--
2.21.0.225.g810b269d1ac-goog
Powered by blists - more mailing lists