lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:	Fri, 8 May 2015 11:25:24 -0400
From:	Chris Metcalf <cmetcalf@...hip.com>
To:	<linux-kernel@...r.kernel.org>, Tejun Heo <tj@...nel.org>
CC:	Chris Metcalf <cmetcalf@...hip.com>
Subject: [PATCH] tile: improve stack backtrace

This commit fixes a number of issues with the tile backtrace code.

- Don't try to identify userspace shared object or executable paths
  if we are doing a backtrace from an interrupt; it's not legal,
  and also unlikely to be interesting.  Likewise, don't try to do
  it for other address spaces, since d_path() assumes it is being
  called in "current" context.

- Move "in_backtrace" from thread_struct to thread_info.
  This way we can access it even if our stack thread_info has been
  clobbered, which makes backtracing more robust.

- Avoid using "current" directly when testing for is_sigreturn().
  Since "current" may be corrupt, we're better off using kbt->task
  explicitly to look up the vdso_base for the current task.
  Conveniently, this simplifies the internal APIs (we only need
  one is_sigreturn() function now).

- Avoid bogus "Odd fault" warning when pc/sp/ex1 are all zero,
  as is true for kernel threads above the last frame.

- Hook into Tejun Heo's dump_stack() framework in lib/dump_stack.c.

- Write last entry in save_stack_trace() as ULONG_MAX, not zero,
  since ftrace (at least) relies on finding that marker.

- Implement save_stack_trace_regs() and save_strack_trace_user(),
  and set CONFIG_USER_STACKTRACE_SUPPORT.

Signed-off-by: Chris Metcalf <cmetcalf@...hip.com>
---
 arch/tile/Kconfig                   |   1 +
 arch/tile/include/asm/processor.h   |   2 -
 arch/tile/include/asm/stack.h       |  13 ++--
 arch/tile/include/asm/thread_info.h |   1 +
 arch/tile/kernel/entry.S            |   7 --
 arch/tile/kernel/process.c          |  36 +++++++----
 arch/tile/kernel/stack.c            | 125 +++++++++++++++++++++---------------
 arch/tile/kernel/traps.c            |   3 +
 arch/tile/lib/exports.c             |   1 -
 9 files changed, 107 insertions(+), 82 deletions(-)

diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 8fac40f56ec7..bcc7d66976f1 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -24,6 +24,7 @@ config TILE
 	select MODULES_USE_ELF_RELA
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_SYSCALL_TRACEPOINTS
+	select USER_STACKTRACE_SUPPORT
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 	select HAVE_DEBUG_STACKOVERFLOW
 	select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index dd4f9f17e30a..139dfdee0134 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -111,8 +111,6 @@ struct thread_struct {
 	unsigned long long interrupt_mask;
 	/* User interrupt-control 0 state */
 	unsigned long intctrl_0;
-	/* Is this task currently doing a backtrace? */
-	bool in_backtrace;
 	/* Any other miscellaneous processor state bits */
 	unsigned long proc_status;
 #if !CHIP_HAS_FIXED_INTVEC_BASE()
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
index 0e9d382a2d45..c3cb42615a9f 100644
--- a/arch/tile/include/asm/stack.h
+++ b/arch/tile/include/asm/stack.h
@@ -58,17 +58,14 @@ extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt);
 /* Advance to the next frame. */
 extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt);
 
+/* Dump just the contents of the pt_regs structure. */
+extern void tile_show_regs(struct pt_regs *);
+
 /*
  * Dump stack given complete register info. Use only from the
  * architecture-specific code; show_stack()
- * and dump_stack() (in entry.S) are architecture-independent entry points.
+ * and dump_stack() are architecture-independent entry points.
  */
-extern void tile_show_stack(struct KBacktraceIterator *, int headers);
-
-/* Dump stack of current process, with registers to seed the backtrace. */
-extern void dump_stack_regs(struct pt_regs *);
-
-/* Helper method for assembly dump_stack(). */
-extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
+extern void tile_show_stack(struct KBacktraceIterator *);
 
 #endif /* _ASM_TILE_STACK_H */
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index f804c39a5e4d..dc1fb28d9636 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -42,6 +42,7 @@ struct thread_info {
 	unsigned long		unalign_jit_tmp[4]; /* temp r0..r3 storage */
 	void __user		*unalign_jit_base; /* unalign fixup JIT base */
 #endif
+	bool in_backtrace;			/* currently doing backtrace? */
 };
 
 /*
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 3d9175992a20..670a3569450f 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -27,13 +27,6 @@ STD_ENTRY(current_text_addr)
 	{ move r0, lr; jrp lr }
 	STD_ENDPROC(current_text_addr)
 
-STD_ENTRY(dump_stack)
-	{ move r2, lr; lnk r1 }
-	{ move r4, r52; addli r1, r1, dump_stack - . }
-	{ move r3, sp; j _dump_stack }
-	jrp lr   /* keep backtracer happy */
-	STD_ENDPROC(dump_stack)
-
 STD_ENTRY(KBacktraceIterator_init_current)
 	{ move r2, lr; lnk r1 }
 	{ move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 96ea75e04582..a45213781ad0 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -546,33 +546,43 @@ void exit_thread(void)
 #endif
 }
 
-void show_regs(struct pt_regs *regs)
+void tile_show_regs(struct pt_regs *regs)
 {
-	struct task_struct *tsk = validate_current();
 	int i;
-
-	if (tsk != &corrupt_current)
-		show_regs_print_info(KERN_ERR);
 #ifdef __tilegx__
 	for (i = 0; i < 17; i++)
-		pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
+		pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
 		       i, regs->regs[i], i+18, regs->regs[i+18],
 		       i+36, regs->regs[i+36]);
-	pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n",
+	pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
 	       regs->regs[17], regs->regs[35], regs->tp);
-	pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr);
+	pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
 #else
 	for (i = 0; i < 13; i++)
-		pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
+		pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
+		       " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
 		       i, regs->regs[i], i+14, regs->regs[i+14],
 		       i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
-	pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n",
+	pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
 	       regs->regs[13], regs->tp, regs->sp, regs->lr);
 #endif
-	pr_err(" pc : " REGFMT " ex1: %ld     faultnum: %ld\n",
-	       regs->pc, regs->ex1, regs->faultnum);
+	pr_err(" pc : "REGFMT" ex1: %ld     faultnum: %ld flags:%s%s%s%s\n",
+	       regs->pc, regs->ex1, regs->faultnum,
+	       is_compat_task() ? " compat" : "",
+	       (regs->flags & PT_FLAGS_DISABLE_IRQ) ? " noirq" : "",
+	       !(regs->flags & PT_FLAGS_CALLER_SAVES) ? " nocallersave" : "",
+	       (regs->flags & PT_FLAGS_RESTORE_REGS) ? " restoreregs" : "");
+}
+
+void show_regs(struct pt_regs *regs)
+{
+	struct KBacktraceIterator kbt;
+
+	show_regs_print_info(KERN_DEFAULT);
+	tile_show_regs(regs);
 
-	dump_stack_regs(regs);
+	KBacktraceIterator_init(&kbt, NULL, regs);
+	tile_show_stack(&kbt);
 }
 
 /* To ensure stack dump on tiles occurs one by one. */
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index c42dce50acd8..35d34635e4f1 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -23,6 +23,7 @@
 #include <linux/mmzone.h>
 #include <linux/dcache.h>
 #include <linux/fs.h>
+#include <linux/hardirq.h>
 #include <linux/string.h>
 #include <asm/backtrace.h>
 #include <asm/page.h>
@@ -109,7 +110,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
 		if (kbt->verbose)
 			pr_err("  <%s while in user mode>\n", fault);
 	} else {
-		if (kbt->verbose)
+		if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
 			pr_err("  (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
 			       p->pc, p->sp, p->ex1);
 		return NULL;
@@ -119,10 +120,12 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
 	return p;
 }
 
-/* Is the pc pointing to a sigreturn trampoline? */
-static int is_sigreturn(unsigned long pc)
+/* Is the iterator pointing to a sigreturn trampoline? */
+static int is_sigreturn(struct KBacktraceIterator *kbt)
 {
-	return current->mm && (pc == VDSO_SYM(&__vdso_rt_sigreturn));
+	return kbt->task->mm &&
+		(kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
+				(ulong)&__vdso_rt_sigreturn));
 }
 
 /* Return a pt_regs pointer for a valid signal handler frame */
@@ -131,7 +134,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
 {
 	BacktraceIterator *b = &kbt->it;
 
-	if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET &&
+	if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
 	    b->sp % sizeof(long) == 0) {
 		int retval;
 		pagefault_disable();
@@ -151,11 +154,6 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
 	return NULL;
 }
 
-static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
-{
-	return is_sigreturn(kbt->it.pc);
-}
-
 static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
 {
 	struct pt_regs *p;
@@ -178,7 +176,7 @@ static int KBacktraceIterator_next_item_inclusive(
 {
 	for (;;) {
 		do {
-			if (!KBacktraceIterator_is_sigreturn(kbt))
+			if (!is_sigreturn(kbt))
 				return KBT_ONGOING;
 		} while (backtrace_next(&kbt->it));
 
@@ -357,51 +355,50 @@ static void describe_addr(struct KBacktraceIterator *kbt,
  */
 static bool start_backtrace(void)
 {
-	if (current->thread.in_backtrace) {
+	if (current_thread_info()->in_backtrace) {
 		pr_err("Backtrace requested while in backtrace!\n");
 		return false;
 	}
-	current->thread.in_backtrace = true;
+	current_thread_info()->in_backtrace = true;
 	return true;
 }
 
 static void end_backtrace(void)
 {
-	current->thread.in_backtrace = false;
+	current_thread_info()->in_backtrace = false;
 }
 
 /*
  * This method wraps the backtracer's more generic support.
  * It is only invoked from the architecture-specific code; show_stack()
- * and dump_stack() (in entry.S) are architecture-independent entry points.
+ * and dump_stack() are architecture-independent entry points.
  */
-void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
+void tile_show_stack(struct KBacktraceIterator *kbt)
 {
 	int i;
 	int have_mmap_sem = 0;
 
 	if (!start_backtrace())
 		return;
-	if (headers) {
-		/*
-		 * Add a blank line since if we are called from panic(),
-		 * then bust_spinlocks() spit out a space in front of us
-		 * and it will mess up our KERN_ERR.
-		 */
-		pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
-		       kbt->task->pid, kbt->task->tgid, kbt->task->comm,
-		       raw_smp_processor_id(), get_cycles());
-	}
 	kbt->verbose = 1;
 	i = 0;
 	for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
 		char namebuf[KSYM_NAME_LEN+100];
 		unsigned long address = kbt->it.pc;
 
-		/* Try to acquire the mmap_sem as we pass into userspace. */
-		if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
+		/*
+		 * Try to acquire the mmap_sem as we pass into userspace.
+		 * If we're in an interrupt context, don't even try, since
+		 * it's not safe to call e.g. d_path() from an interrupt,
+		 * since it uses spin locks without disabling interrupts.
+		 * Note we test "kbt->task == current", not "kbt->is_current",
+		 * since we're checking that "current" will work in d_path().
+		 */
+		if (kbt->task == current && address < PAGE_OFFSET &&
+		    !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
 			have_mmap_sem =
 				down_read_trylock(&kbt->task->mm->mmap_sem);
+		}
 
 		describe_addr(kbt, address, have_mmap_sem,
 			      namebuf, sizeof(namebuf));
@@ -416,24 +413,12 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
 	}
 	if (kbt->end == KBT_LOOP)
 		pr_err("Stack dump stopped; next frame identical to this one\n");
-	if (headers)
-		pr_err("Stack dump complete\n");
 	if (have_mmap_sem)
 		up_read(&kbt->task->mm->mmap_sem);
 	end_backtrace();
 }
 EXPORT_SYMBOL(tile_show_stack);
 
-
-/* This is called from show_regs() and _dump_stack() */
-void dump_stack_regs(struct pt_regs *regs)
-{
-	struct KBacktraceIterator kbt;
-	KBacktraceIterator_init(&kbt, NULL, regs);
-	tile_show_stack(&kbt, 1);
-}
-EXPORT_SYMBOL(dump_stack_regs);
-
 static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
 				       ulong pc, ulong lr, ulong sp, ulong r52)
 {
@@ -445,11 +430,15 @@ static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
 	return regs;
 }
 
-/* This is called from dump_stack() and just converts to pt_regs */
+/* Deprecated function currently only used by kernel_double_fault(). */
 void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
 {
+	struct KBacktraceIterator kbt;
 	struct pt_regs regs;
-	dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
+
+	regs_to_pt_regs(&regs, pc, lr, sp, r52);
+	KBacktraceIterator_init(&kbt, NULL, &regs);
+	tile_show_stack(&kbt);
 }
 
 /* This is called from KBacktraceIterator_init_current() */
@@ -461,22 +450,30 @@ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
 				regs_to_pt_regs(&regs, pc, lr, sp, r52));
 }
 
-/* This is called only from kernel/sched/core.c, with esp == NULL */
+/*
+ * Called from sched_show_task() with task != NULL, or dump_stack()
+ * with task == NULL.  The esp argument is always NULL.
+ */
 void show_stack(struct task_struct *task, unsigned long *esp)
 {
 	struct KBacktraceIterator kbt;
-	if (task == NULL || task == current)
+	if (task == NULL || task == current) {
 		KBacktraceIterator_init_current(&kbt);
-	else
+		KBacktraceIterator_next(&kbt);  /* don't show first frame */
+	} else {
 		KBacktraceIterator_init(&kbt, task, NULL);
-	tile_show_stack(&kbt, 0);
+	}
+	tile_show_stack(&kbt);
 }
 
 #ifdef CONFIG_STACKTRACE
 
 /* Support generic Linux stack API too */
 
-void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+static void save_stack_trace_common(struct task_struct *task,
+				    struct pt_regs *regs,
+				    bool user,
+				    struct stack_trace *trace)
 {
 	struct KBacktraceIterator kbt;
 	int skip = trace->skip;
@@ -484,31 +481,57 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
 
 	if (!start_backtrace())
 		goto done;
-	if (task == NULL || task == current)
+	if (regs != NULL) {
+		KBacktraceIterator_init(&kbt, NULL, regs);
+	} else if (task == NULL || task == current) {
 		KBacktraceIterator_init_current(&kbt);
-	else
+		skip++;  /* don't show KBacktraceIterator_init_current */
+	} else {
 		KBacktraceIterator_init(&kbt, task, NULL);
+	}
 	for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
 		if (skip) {
 			--skip;
 			continue;
 		}
-		if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
+		if (i >= trace->max_entries ||
+		    (!user && kbt.it.pc < PAGE_OFFSET))
 			break;
 		trace->entries[i++] = kbt.it.pc;
 	}
 	end_backtrace();
 done:
+	if (i < trace->max_entries)
+		trace->entries[i++] = ULONG_MAX;
 	trace->nr_entries = i;
 }
+
+void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+{
+	save_stack_trace_common(task, NULL, false, trace);
+}
 EXPORT_SYMBOL(save_stack_trace_tsk);
 
 void save_stack_trace(struct stack_trace *trace)
 {
-	save_stack_trace_tsk(NULL, trace);
+	save_stack_trace_common(NULL, NULL, false, trace);
 }
 EXPORT_SYMBOL_GPL(save_stack_trace);
 
+void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+	save_stack_trace_common(NULL, regs, false, trace);
+}
+
+void save_stack_trace_user(struct stack_trace *trace)
+{
+	/* Trace user stack if we are not a kernel thread. */
+	if (current->mm)
+		save_stack_trace_common(NULL, task_pt_regs(current),
+					true, trace);
+	else if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
 #endif
 
 /* In entry.S */
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 855f7316f1ee..0011a9ff0525 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -407,6 +407,9 @@ void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
 	}
 }
 
+/* Deprecated function currently only used here. */
+extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
+
 void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
 {
 	_dump_stack(dummy, pc, lr, sp, r52);
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 16326f288177..9d171ca4302c 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -26,7 +26,6 @@ EXPORT_SYMBOL(finv_user_asm);
 #include <linux/kernel.h>
 #include <asm/processor.h>
 EXPORT_SYMBOL(current_text_addr);
-EXPORT_SYMBOL(dump_stack);
 
 /* arch/tile/kernel/head.S */
 EXPORT_SYMBOL(empty_zero_page);
-- 
2.1.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ