[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190414160143.591255977@linutronix.de>
Date: Sun, 14 Apr 2019 17:59:37 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Andy Lutomirski <luto@...nel.org>,
Josh Poimboeuf <jpoimboe@...hat.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Pekka Enberg <penberg@...nel.org>, linux-mm@...ck.org
Subject: [patch V3 01/32] mm/slab: Fix broken stack trace storage
kstack_end() is broken on interrupt stacks as they are not guaranteed to be
sized THREAD_SIZE and THREAD_SIZE aligned.
Use the stack tracer instead. Remove the pointless pointer increment at the
end of the function while at it.
Fixes: 98eb235b7feb ("[PATCH] page unmapping debug") - History tree
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Pekka Enberg <penberg@...nel.org>
Cc: linux-mm@...ck.org
---
mm/slab.c | 28 ++++++++++++----------------
1 file changed, 12 insertions(+), 16 deletions(-)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1470,33 +1470,29 @@ static bool is_debug_pagealloc_cache(str
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
unsigned long caller)
{
- int size = cachep->object_size;
+ int size = cachep->object_size / sizeof(unsigned long);
addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
- if (size < 5 * sizeof(unsigned long))
+ if (size < 5)
return;
*addr++ = 0x12345678;
*addr++ = caller;
*addr++ = smp_processor_id();
- size -= 3 * sizeof(unsigned long);
+#ifdef CONFIG_STACKTRACE
{
- unsigned long *sptr = &caller;
- unsigned long svalue;
-
- while (!kstack_end(sptr)) {
- svalue = *sptr++;
- if (kernel_text_address(svalue)) {
- *addr++ = svalue;
- size -= sizeof(unsigned long);
- if (size <= sizeof(unsigned long))
- break;
- }
- }
+ struct stack_trace trace = {
+ .max_entries = size - 4;
+ .entries = addr;
+ .skip = 3;
+ };
+ save_stack_trace(&trace);
+ addr += trace.nr_entries;
}
- *addr++ = 0x87654321;
+#endif
+ *addr = 0x87654321;
}
static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
Powered by blists - more mailing lists