[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250127213310.2496133-7-wnliu@google.com>
Date: Mon, 27 Jan 2025 21:33:08 +0000
From: Weinan Liu <wnliu@...gle.com>
To: Josh Poimboeuf <jpoimboe@...nel.org>, Steven Rostedt <rostedt@...dmis.org>,
Indu Bhagat <indu.bhagat@...cle.com>, Peter Zijlstra <peterz@...radead.org>
Cc: Mark Rutland <mark.rutland@....com>, roman.gushchin@...ux.dev,
Will Deacon <will@...nel.org>, Ian Rogers <irogers@...gle.com>, linux-toolchains@...r.kernel.org,
linux-kernel@...r.kernel.org, live-patching@...r.kernel.org,
joe.lawrence@...hat.com, linux-arm-kernel@...ts.infradead.org,
Weinan Liu <wnliu@...gle.com>
Subject: [PATCH 6/8] unwind: arm64: add reliable stacktrace support for arm64
To support livepatch, we need to add arch_stack_walk_reliable to
support reliable stacktrace according to
https://docs.kernel.org/livepatch/reliable-stacktrace.html#requirements
report stacktrace is not reliable if we are not able to unwind the stack
by sframe unwinder and fallback to FP based unwinder
Signed-off-by: Weinan Liu <wnliu@...gle.com>
---
arch/arm64/include/asm/stacktrace/common.h | 2 +
arch/arm64/kernel/stacktrace.c | 47 +++++++++++++++++++++-
2 files changed, 47 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
index 19edae8a5b1a..26449cd402db 100644
--- a/arch/arm64/include/asm/stacktrace/common.h
+++ b/arch/arm64/include/asm/stacktrace/common.h
@@ -26,6 +26,7 @@ struct stack_info {
* @stacks: An array of stacks which can be unwound.
* @nr_stacks: The number of stacks in @stacks.
* @cfa: The sp value at the call site of the current function.
+ * @unreliable: Stacktrace is unreliable.
*/
struct unwind_state {
unsigned long fp;
@@ -36,6 +37,7 @@ struct unwind_state {
int nr_stacks;
#ifdef CONFIG_SFRAME_UNWINDER
unsigned long cfa;
+ bool unreliable;
#endif
};
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c035adb8fe8a..eab16dc05bb5 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -310,11 +310,16 @@ kunwind_next(struct kunwind_state *state)
case KUNWIND_SOURCE_TASK:
case KUNWIND_SOURCE_REGS_PC:
#ifdef CONFIG_SFRAME_UNWINDER
- err = unwind_next_frame_sframe(&state->common);
+ if (!state->common.unreliable)
+ err = unwind_next_frame_sframe(&state->common);
/* Fallback to FP based unwinder */
- if (err)
+ if (err || state->common.unreliable) {
err = kunwind_next_frame_record(state);
+ /* Mark its stacktrace result as unreliable if it is unwindable via FP */
+ if (!err)
+ state->common.unreliable = true;
+ }
#else
err = kunwind_next_frame_record(state);
#endif
@@ -446,6 +451,44 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
}
+#ifdef CONFIG_SFRAME_UNWINDER
+struct kunwind_reliable_consume_entry_data {
+ stack_trace_consume_fn consume_entry;
+ void *cookie;
+ bool unreliable;
+};
+
+static __always_inline bool
+arch_kunwind_reliable_consume_entry(const struct kunwind_state *state, void *cookie)
+{
+ struct kunwind_reliable_consume_entry_data *data = cookie;
+
+ if (state->common.unreliable) {
+ data->unreliable = true;
+ return false;
+ }
+ return data->consume_entry(data->cookie, state->common.pc);
+}
+
+noinline notrace int arch_stack_walk_reliable(
+ stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
+{
+ struct kunwind_reliable_consume_entry_data data = {
+ .consume_entry = consume_entry,
+ .cookie = cookie,
+ .unreliable = false,
+ };
+
+ kunwind_stack_walk(arch_kunwind_reliable_consume_entry, &data, task, NULL);
+
+ if (data.unreliable)
+ return -EINVAL;
+
+ return 0;
+}
+#endif
+
struct bpf_unwind_consume_entry_data {
bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
void *cookie;
--
2.48.1.262.g85cc9f2d1e-goog
Powered by blists - more mailing lists