lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-2a820bf74918d61ea54f7c1001f4a6a2e457577c@git.kernel.org>
Date:   Mon, 29 Apr 2019 11:45:06 -0700
From:   tip-bot for Thomas Gleixner <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     airlied@...ux.ie, jpoimboe@...hat.com, jani.nikula@...ux.intel.com,
        mbenes@...e.cz, catalin.marinas@....com, mingo@...nel.org,
        tom.zanussi@...ux.intel.com, akpm@...ux-foundation.org,
        robin.murphy@....com, aryabinin@...tuozzo.com,
        rppt@...ux.vnet.ibm.com, hch@....de, rientjes@...gle.com,
        m.szyprowski@...sung.com, glider@...gle.com, josef@...icpanda.com,
        dsterba@...e.com, cl@...ux.com, snitzer@...hat.com, hpa@...or.com,
        akinobu.mita@...il.com, rostedt@...dmis.org,
        joonas.lahtinen@...ux.intel.com, dvyukov@...gle.com,
        rodrigo.vivi@...el.com, jthumshirn@...e.de,
        maarten.lankhorst@...ux.intel.com, linux-kernel@...r.kernel.org,
        clm@...com, penberg@...nel.org, daniel@...ll.ch, agk@...hat.com,
        tglx@...utronix.de, luto@...nel.org, adobriyan@...il.com
Subject: [tip:core/stacktrace] tracing: Use percpu stack trace buffer more
 intelligently

Commit-ID:  2a820bf74918d61ea54f7c1001f4a6a2e457577c
Gitweb:     https://git.kernel.org/tip/2a820bf74918d61ea54f7c1001f4a6a2e457577c
Author:     Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Thu, 25 Apr 2019 11:45:14 +0200
Committer:  Thomas Gleixner <tglx@...utronix.de>
CommitDate: Mon, 29 Apr 2019 12:37:55 +0200

tracing: Use percpu stack trace buffer more intelligently

The per cpu stack trace buffer usage pattern is odd at best. The buffer has
place for 512 stack trace entries on 64-bit and 1024 on 32-bit. When
interrupts or exceptions nest after the per cpu buffer was acquired the
stacktrace length is hardcoded to 8 entries. 512/1024 stack trace entries
in kernel stacks are unrealistic so the buffer is a complete waste.

Split the buffer into 4 nest levels, which are 128/256 entries per
level. This allows nesting contexts (interrupts, exceptions) to utilize the
cpu buffer for stack retrieval and avoids the fixed length allocation along
with the conditional execution pathes.

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@...hat.com>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Steven Rostedt <rostedt@...dmis.org>
Cc: Alexander Potapenko <glider@...gle.com>
Cc: Alexey Dobriyan <adobriyan@...il.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Christoph Lameter <cl@...ux.com>
Cc: Pekka Enberg <penberg@...nel.org>
Cc: linux-mm@...ck.org
Cc: David Rientjes <rientjes@...gle.com>
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: Dmitry Vyukov <dvyukov@...gle.com>
Cc: Andrey Ryabinin <aryabinin@...tuozzo.com>
Cc: kasan-dev@...glegroups.com
Cc: Mike Rapoport <rppt@...ux.vnet.ibm.com>
Cc: Akinobu Mita <akinobu.mita@...il.com>
Cc: Christoph Hellwig <hch@....de>
Cc: iommu@...ts.linux-foundation.org
Cc: Robin Murphy <robin.murphy@....com>
Cc: Marek Szyprowski <m.szyprowski@...sung.com>
Cc: Johannes Thumshirn <jthumshirn@...e.de>
Cc: David Sterba <dsterba@...e.com>
Cc: Chris Mason <clm@...com>
Cc: Josef Bacik <josef@...icpanda.com>
Cc: linux-btrfs@...r.kernel.org
Cc: dm-devel@...hat.com
Cc: Mike Snitzer <snitzer@...hat.com>
Cc: Alasdair Kergon <agk@...hat.com>
Cc: Daniel Vetter <daniel@...ll.ch>
Cc: intel-gfx@...ts.freedesktop.org
Cc: Joonas Lahtinen <joonas.lahtinen@...ux.intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>
Cc: dri-devel@...ts.freedesktop.org
Cc: David Airlie <airlied@...ux.ie>
Cc: Jani Nikula <jani.nikula@...ux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@...el.com>
Cc: Tom Zanussi <tom.zanussi@...ux.intel.com>
Cc: Miroslav Benes <mbenes@...e.cz>
Cc: linux-arch@...r.kernel.org
Link: https://lkml.kernel.org/r/20190425094803.066064076@linutronix.de

---
 kernel/trace/trace.c | 73 ++++++++++++++++++++++++++--------------------------
 1 file changed, 37 insertions(+), 36 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 21153e64bf1c..4fc93004feab 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2749,12 +2749,21 @@ trace_function(struct trace_array *tr,
 
 #ifdef CONFIG_STACKTRACE
 
-#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
+/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
+#define FTRACE_KSTACK_NESTING	4
+
+#define FTRACE_KSTACK_ENTRIES	(PAGE_SIZE / FTRACE_KSTACK_NESTING)
+
 struct ftrace_stack {
-	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];
+	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
+};
+
+
+struct ftrace_stacks {
+	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
 };
 
-static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
+static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
 
 static void __ftrace_trace_stack(struct ring_buffer *buffer,
@@ -2763,10 +2772,11 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
 {
 	struct trace_event_call *call = &event_kernel_stack;
 	struct ring_buffer_event *event;
+	struct ftrace_stack *fstack;
 	struct stack_entry *entry;
 	struct stack_trace trace;
-	int use_stack;
-	int size = FTRACE_STACK_ENTRIES;
+	int size = FTRACE_KSTACK_ENTRIES;
+	int stackidx;
 
 	trace.nr_entries	= 0;
 	trace.skip		= skip;
@@ -2788,29 +2798,32 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
 	 */
 	preempt_disable_notrace();
 
-	use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
+	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
+
+	/* This should never happen. If it does, yell once and skip */
+	if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
+		goto out;
+
 	/*
-	 * We don't need any atomic variables, just a barrier.
-	 * If an interrupt comes in, we don't care, because it would
-	 * have exited and put the counter back to what we want.
-	 * We just need a barrier to keep gcc from moving things
-	 * around.
+	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
+	 * interrupt will either see the value pre increment or post
+	 * increment. If the interrupt happens pre increment it will have
+	 * restored the counter when it returns.  We just need a barrier to
+	 * keep gcc from moving things around.
 	 */
 	barrier();
-	if (use_stack == 1) {
-		trace.entries		= this_cpu_ptr(ftrace_stack.calls);
-		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;
 
-		if (regs)
-			save_stack_trace_regs(regs, &trace);
-		else
-			save_stack_trace(&trace);
+	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
+	trace.entries		= fstack->calls;
+	trace.max_entries	= FTRACE_KSTACK_ENTRIES;
 
-		if (trace.nr_entries > size)
-			size = trace.nr_entries;
-	} else
-		/* From now on, use_stack is a boolean */
-		use_stack = 0;
+	if (regs)
+		save_stack_trace_regs(regs, &trace);
+	else
+		save_stack_trace(&trace);
+
+	if (trace.nr_entries > size)
+		size = trace.nr_entries;
 
 	size *= sizeof(unsigned long);
 
@@ -2820,19 +2833,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
 		goto out;
 	entry = ring_buffer_event_data(event);
 
-	memset(&entry->caller, 0, size);
-
-	if (use_stack)
-		memcpy(&entry->caller, trace.entries,
-		       trace.nr_entries * sizeof(unsigned long));
-	else {
-		trace.max_entries	= FTRACE_STACK_ENTRIES;
-		trace.entries		= entry->caller;
-		if (regs)
-			save_stack_trace_regs(regs, &trace);
-		else
-			save_stack_trace(&trace);
-	}
+	memcpy(&entry->caller, trace.entries, size);
 
 	entry->size = trace.nr_entries;
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ