lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon,  3 May 2021 12:36:13 -0500
From:   madvenka@...ux.microsoft.com
To:     broonie@...nel.org, jpoimboe@...hat.com, mark.rutland@....com,
        jthierry@...hat.com, catalin.marinas@....com, will@...nel.org,
        jmorris@...ei.org, pasha.tatashin@...een.com,
        linux-arm-kernel@...ts.infradead.org,
        live-patching@...r.kernel.org, linux-kernel@...r.kernel.org,
        madvenka@...ux.microsoft.com
Subject: [RFC PATCH v3 2/4] arm64: Check the return PC against unreliable code sections

From: "Madhavan T. Venkataraman" <madvenka@...ux.microsoft.com>

Create a sym_code_ranges[] array to cover the following text sections that
contain functions defined as SYM_CODE_*(). These functions are low-level
functions (and do not have a proper frame pointer prolog and epilog). So,
they are inherently unreliable from a stack unwinding perspective.

	.entry.text
	.idmap.text
	.hyp.idmap.text
	.hyp.text
	.hibernate_exit.text
	.entry.tramp.text

If a return PC falls in any of these, mark the stack trace unreliable.

The only exception to this is - if the unwinder has reached the last
frame already, it will not mark the stack trace unreliable since there
is no more unwinding to do. E.g.,

	- ret_from_fork() occurs at the end of the stack trace of
	  kernel tasks.

	- el0_*() functions occur at the end of EL0 exception stack
	  traces. This covers all user task entries into the kernel.

NOTE:
	- EL1 exception handlers are in .entry.text. So, stack traces that
	  contain those functions will be marked not reliable. This covers
	  interrupts, exceptions and breakpoints encountered while executing
	  in the kernel.

	- At the end of an interrupt, the kernel can preempt the current
	  task if required. So, the stack traces of all preempted tasks will
	  show the interrupt frame and will be considered unreliable.

Signed-off-by: Madhavan T. Venkataraman <madvenka@...ux.microsoft.com>
---
 arch/arm64/kernel/stacktrace.c | 54 ++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c21a1bca28f3..1ff14615a55a 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -15,9 +15,48 @@
 
 #include <asm/irq.h>
 #include <asm/pointer_auth.h>
+#include <asm/sections.h>
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
 
+struct code_range {
+	unsigned long	start;
+	unsigned long	end;
+};
+
+struct code_range	sym_code_ranges[] =
+{
+	/* non-unwindable ranges */
+	{ (unsigned long)__entry_text_start,
+	  (unsigned long)__entry_text_end },
+	{ (unsigned long)__idmap_text_start,
+	  (unsigned long)__idmap_text_end },
+	{ (unsigned long)__hyp_idmap_text_start,
+	  (unsigned long)__hyp_idmap_text_end },
+	{ (unsigned long)__hyp_text_start,
+	  (unsigned long)__hyp_text_end },
+#ifdef CONFIG_HIBERNATION
+	{ (unsigned long)__hibernate_exit_text_start,
+	  (unsigned long)__hibernate_exit_text_end },
+#endif
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	{ (unsigned long)__entry_tramp_text_start,
+	  (unsigned long)__entry_tramp_text_end },
+#endif
+	{ /* sentinel */ }
+};
+
+static struct code_range *lookup_range(unsigned long pc)
+{
+	struct code_range *range;
+
+	for (range = sym_code_ranges; range->start; range++) {
+		if (pc >= range->start && pc < range->end)
+			return range;
+	}
+	return range;
+}
+
 /*
  * AArch64 PCS assigns the frame pointer to x29.
  *
@@ -43,6 +82,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 {
 	unsigned long fp = frame->fp;
 	struct stack_info info;
+	struct code_range *range;
 
 	frame->reliable = true;
 
@@ -103,6 +143,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 		return 0;
 	}
 
+	range = lookup_range(frame->pc);
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	if (tsk->ret_stack &&
 		frame->pc == (unsigned long)return_to_handler) {
@@ -118,9 +160,21 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 			return -EINVAL;
 		frame->pc = ret_stack->ret;
 		frame->pc = ptrauth_strip_insn_pac(frame->pc);
+		return 0;
 	}
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
+	if (!range->start)
+		return 0;
+
+	/*
+	 * The return PC falls in an unreliable function. If the final frame
+	 * has been reached, no more unwinding is needed. Otherwise, mark the
+	 * stack trace not reliable.
+	 */
+	if (frame->fp)
+		frame->reliable = false;
+
 	return 0;
 }
 NOKPROBE_SYMBOL(unwind_frame);
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ