[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211123193723.12112-5-madvenka@linux.microsoft.com>
Date: Tue, 23 Nov 2021 13:37:22 -0600
From: madvenka@...ux.microsoft.com
To: mark.rutland@....com, broonie@...nel.org, jpoimboe@...hat.com,
ardb@...nel.org, nobuta.keiya@...itsu.com,
sjitindarsingh@...il.com, catalin.marinas@....com, will@...nel.org,
jmorris@...ei.org, linux-arm-kernel@...ts.infradead.org,
live-patching@...r.kernel.org, linux-kernel@...r.kernel.org,
madvenka@...ux.microsoft.com
Subject: [PATCH v11 4/5] arm64: Introduce stack trace reliability checks in the unwinder
From: "Madhavan T. Venkataraman" <madvenka@...ux.microsoft.com>
There are some kernel features and conditions that make a stack trace
unreliable. Callers may require the unwinder to detect these cases.
E.g., livepatch.
Introduce a new function called unwind_check_reliability() that will
detect these cases and set a flag in the stack frame. Call
unwind_check_reliability() for every frame, that is, in unwind_start()
and unwind_next().
Introduce the first reliability check in unwind_check_reliability() - If
a return PC is not a valid kernel text address, consider the stack
trace unreliable. It could be some generated code. Other reliability checks
will be added in the future.
Let unwind() return a boolean to indicate if the stack trace is
reliable.
Introduce arch_stack_walk_reliable() for ARM64. This works like
arch_stack_walk() except that it returns -EINVAL if the stack trace is not
reliable.
Until all the reliability checks are in place, arch_stack_walk_reliable()
may not be used by livepatch. But it may be used by debug and test code.
Signed-off-by: Madhavan T. Venkataraman <madvenka@...ux.microsoft.com>
---
arch/arm64/include/asm/stacktrace.h | 3 ++
arch/arm64/kernel/stacktrace.c | 59 +++++++++++++++++++++++++++--
2 files changed, 58 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index d838586adef9..7143e80c3d96 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -53,6 +53,8 @@ struct stack_info {
* value.
*
* @failed: Unwind failed.
+ *
+ * @reliable: Stack trace is reliable.
*/
struct stackframe {
unsigned long fp;
@@ -64,6 +66,7 @@ struct stackframe {
struct llist_node *kr_cur;
#endif
bool failed;
+ bool reliable;
};
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 3b670ab1f0e9..77eb00e45558 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -18,6 +18,26 @@
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
+/*
+ * Check the stack frame for conditions that make further unwinding unreliable.
+ */
+static void unwind_check_reliability(struct task_struct *task,
+ struct stackframe *frame)
+{
+ if (frame->fp == (unsigned long)task_pt_regs(task)->stackframe) {
+ /* Final frame; no more unwind, no need to check reliability */
+ return;
+ }
+
+ /*
+ * If the PC is not a known kernel text address, then we cannot
+ * be sure that a subsequent unwind will be reliable, as we
+ * don't know that the code follows our unwind requirements.
+ */
+ if (!__kernel_text_address(frame->pc))
+ frame->reliable = false;
+}
+
/*
* AArch64 PCS assigns the frame pointer to x29.
*
@@ -33,8 +53,9 @@
*/
-static void unwind_start(struct stackframe *frame, unsigned long fp,
- unsigned long pc)
+static void unwind_start(struct task_struct *task,
+ struct stackframe *frame,
+ unsigned long fp, unsigned long pc)
{
frame->fp = fp;
frame->pc = pc;
@@ -55,6 +76,8 @@ static void unwind_start(struct stackframe *frame, unsigned long fp,
frame->prev_fp = 0;
frame->prev_type = STACK_TYPE_UNKNOWN;
frame->failed = false;
+ frame->reliable = true;
+ unwind_check_reliability(task, frame);
}
/*
@@ -141,6 +164,7 @@ static void notrace unwind_next(struct task_struct *tsk,
if (is_kretprobe_trampoline(frame->pc))
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
#endif
+ unwind_check_reliability(tsk, frame);
}
NOKPROBE_SYMBOL(unwind_next);
@@ -166,15 +190,16 @@ static bool unwind_continue(struct task_struct *task,
return true;
}
-static void notrace unwind(struct task_struct *tsk,
+static bool notrace unwind(struct task_struct *tsk,
unsigned long fp, unsigned long pc,
bool (*fn)(void *, unsigned long), void *data)
{
struct stackframe frame;
- unwind_start(&frame, fp, pc);
+ unwind_start(tsk, &frame, fp, pc);
while (unwind_continue(tsk, &frame, fn, data))
unwind_next(tsk, &frame);
+ return !frame.failed && frame.reliable;
}
NOKPROBE_SYMBOL(unwind);
@@ -231,3 +256,29 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
}
unwind(task, fp, pc, consume_entry, cookie);
}
+
+/*
+ * arch_stack_walk_reliable() may not be used for livepatch until all of
+ * the reliability checks are in place in unwind_consume(). However,
+ * debug and test code can choose to use it even if all the checks are not
+ * in place.
+ */
+noinline int notrace arch_stack_walk_reliable(stack_trace_consume_fn consume_fn,
+ void *cookie,
+ struct task_struct *task)
+{
+ unsigned long fp, pc;
+
+ if (task == current) {
+ /* Skip arch_stack_walk_reliable() in the stack trace. */
+ fp = (unsigned long)__builtin_frame_address(1);
+ pc = (unsigned long)__builtin_return_address(0);
+ } else {
+ /* Caller guarantees that the task is not running. */
+ fp = thread_saved_fp(task);
+ pc = thread_saved_pc(task);
+ }
+ if (unwind(task, fp, pc, consume_fn, cookie))
+ return 0;
+ return -EINVAL;
+}
--
2.25.1
Powered by blists - more mailing lists