[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1466398527-1122-13-git-send-email-byungchul.park@lge.com>
Date: Mon, 20 Jun 2016 13:55:27 +0900
From: Byungchul Park <byungchul.park@....com>
To: peterz@...radead.org, mingo@...nel.org
Cc: linux-kernel@...r.kernel.org, npiggin@...e.de, walken@...gle.com,
ak@...e.de, tglx@...elltoy.tec.linutronix.de
Subject: [RFC 12/12] x86/dumpstack: Optimize save_stack_trace
Currently, x86 implementation of save_stack_trace() is walking all stack
region word by word regardless of what the trace->max_entries is.
However, it's unnecessary to walk after already fulfilling caller's
requirement, say, if trace->nr_entries >= trace->max_entries is true.
For example, CONFIG_LOCKDEP_CROSSRELEASE implementation calls
save_stack_trace() with max_entries = 5 frequently. I measured its
overhead and printed its difference of sched_clock() with my QEMU x86
machine.
The latency was improved over 70% when trace->max_entries = 5.
Before this patch:
[ 2.326940] save_stack_trace() takes 83931 ns
[ 2.326389] save_stack_trace() takes 62576 ns
[ 2.327575] save_stack_trace() takes 58826 ns
[ 2.327000] save_stack_trace() takes 88980 ns
[ 2.327424] save_stack_trace() takes 59831 ns
[ 2.327575] save_stack_trace() takes 58482 ns
[ 2.327597] save_stack_trace() takes 87114 ns
[ 2.327931] save_stack_trace() takes 121140 ns
[ 2.327434] save_stack_trace() takes 64321 ns
[ 2.328632] save_stack_trace() takes 84997 ns
[ 2.328000] save_stack_trace() takes 115037 ns
[ 2.328460] save_stack_trace() takes 72292 ns
[ 2.328632] save_stack_trace() takes 61236 ns
[ 2.328567] save_stack_trace() takes 76666 ns
[ 2.328867] save_stack_trace() takes 79525 ns
[ 2.328460] save_stack_trace() takes 64902 ns
[ 2.329585] save_stack_trace() takes 58760 ns
[ 2.329000] save_stack_trace() takes 91349 ns
[ 2.329414] save_stack_trace() takes 60069 ns
[ 2.329585] save_stack_trace() takes 61012 ns
[ 2.329573] save_stack_trace() takes 76820 ns
[ 2.329863] save_stack_trace() takes 62131 ns
[ 2.330000] save_stack_trace() takes 99476 ns
[ 2.329846] save_stack_trace() takes 62419 ns
[ 2.330000] save_stack_trace() takes 88918 ns
[ 2.330253] save_stack_trace() takes 73669 ns
[ 2.330520] save_stack_trace() takes 67876 ns
[ 2.330671] save_stack_trace() takes 75963 ns
[ 2.330983] save_stack_trace() takes 95079 ns
[ 2.330451] save_stack_trace() takes 62352 ns
After this patch:
[ 2.780735] save_stack_trace() takes 19902 ns
[ 2.780718] save_stack_trace() takes 20240 ns
[ 2.781692] save_stack_trace() takes 45215 ns
[ 2.781477] save_stack_trace() takes 20191 ns
[ 2.781694] save_stack_trace() takes 20044 ns
[ 2.782589] save_stack_trace() takes 20292 ns
[ 2.782706] save_stack_trace() takes 20024 ns
[ 2.782706] save_stack_trace() takes 19881 ns
[ 2.782881] save_stack_trace() takes 24577 ns
[ 2.782706] save_stack_trace() takes 19901 ns
[ 2.783621] save_stack_trace() takes 24381 ns
[ 2.783621] save_stack_trace() takes 20205 ns
[ 2.783760] save_stack_trace() takes 19956 ns
[ 2.783718] save_stack_trace() takes 20280 ns
[ 2.784179] save_stack_trace() takes 20099 ns
[ 2.784835] save_stack_trace() takes 20055 ns
[ 2.785922] save_stack_trace() takes 20157 ns
[ 2.785922] save_stack_trace() takes 20140 ns
[ 2.786178] save_stack_trace() takes 20040 ns
[ 2.786877] save_stack_trace() takes 20102 ns
[ 2.795000] save_stack_trace() takes 21147 ns
[ 2.795397] save_stack_trace() takes 20230 ns
[ 2.795397] save_stack_trace() takes 31274 ns
[ 2.795739] save_stack_trace() takes 19706 ns
[ 2.796484] save_stack_trace() takes 20266 ns
[ 2.796484] save_stack_trace() takes 20902 ns
[ 2.797000] save_stack_trace() takes 38110 ns
[ 2.797510] save_stack_trace() takes 20224 ns
[ 2.798181] save_stack_trace() takes 20172 ns
[ 2.798837] save_stack_trace() takes 20824 ns
Signed-off-by: Byungchul Park <byungchul.park@....com>
---
arch/x86/include/asm/stacktrace.h | 1 +
arch/x86/kernel/dumpstack.c | 2 ++
arch/x86/kernel/dumpstack_32.c | 2 ++
arch/x86/kernel/stacktrace.c | 7 +++++++
4 files changed, 12 insertions(+)
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 70bbe39..fc572e7 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -41,6 +41,7 @@ struct stacktrace_ops {
/* On negative return stop dumping */
int (*stack)(void *data, char *name);
walk_stack_t walk_stack;
+ int (*end_walk)(void *data);
};
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 9c30acf..355fe8f 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -115,6 +115,8 @@ print_context_stack(struct thread_info *tinfo,
print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
}
stack++;
+ if (ops->end_walk && ops->end_walk(data))
+ break;
}
return bp;
}
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 464ffd6..cc51419 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -71,6 +71,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
context = task_thread_info(task);
bp = ops->walk_stack(context, stack, bp, ops, data,
end_stack, &graph);
+ if (ops->end_walk && ops->end_walk(data))
+ break;
/* Stop if not on irq stack */
if (!end_stack)
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index fdd0c64..9545719 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -43,10 +43,17 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable)
return __save_stack_address(data, addr, reliable, true);
}
+static int save_stack_end(void *data)
+{
+ struct stack_trace *trace = data;
+ return trace->nr_entries >= trace->max_entries;
+}
+
static const struct stacktrace_ops save_stack_ops = {
.stack = save_stack_stack,
.address = save_stack_address,
.walk_stack = print_context_stack,
+ .end_walk = save_stack_end,
};
static const struct stacktrace_ops save_stack_ops_nosched = {
--
1.9.1
Powered by blists - more mailing lists