[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250129232936.1795412-1-song@kernel.org>
Date: Wed, 29 Jan 2025 15:29:35 -0800
From: Song Liu <song@...nel.org>
To: linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
live-patching@...r.kernel.org
Cc: catalin.marinas@....com,
will@...nel.org,
mark.rutland@....com,
jpoimboe@...nel.org,
jikos@...nel.org,
mbenes@...e.cz,
pmladek@...e.com,
joe.lawrence@...hat.com,
surajjs@...zon.com,
duwe@...e.de,
song@...nel.org,
kernel-team@...a.com
Subject: [RFC 1/2] arm64: Implement arch_stack_walk_reliable
Let do_kunwind() and kunwind_stack_walk() return the state of stack walk
properly to the caller, and use them in arch_stack_walk_reliable(). This
can be used to enable livepatching for arm64.
Signed-off-by: Song Liu <song@...nel.org>
---
arch/arm64/Kconfig | 2 +-
arch/arm64/kernel/stacktrace.c | 35 +++++++++++++++++++++++++++-------
2 files changed, 29 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 213f42d5ca27..f5af6faf9e2b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -271,6 +271,7 @@ config ARM64
select HAVE_SOFTIRQ_ON_OWN_STACK
select USER_STACKTRACE_SUPPORT
select VDSO_GETRANDOM
+ select HAVE_RELIABLE_STACKTRACE
help
ARM 64-bit (AArch64) Linux support.
@@ -2495,4 +2496,3 @@ endmenu # "CPU Power Management"
source "drivers/acpi/Kconfig"
source "arch/arm64/kvm/Kconfig"
-
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 1d9d51d7627f..280dd6839a18 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -277,22 +277,28 @@ kunwind_next(struct kunwind_state *state)
typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
-static __always_inline void
+static __always_inline int
do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
void *cookie)
{
+ int ret;
+
if (kunwind_recover_return_address(state))
- return;
+ return -EINVAL;
while (1) {
- int ret;
- if (!consume_state(state, cookie))
+ ret = consume_state(state, cookie);
+ if (!ret)
break;
ret = kunwind_next(state);
if (ret < 0)
break;
}
+ /* Unwind terminated successfully */
+ if (ret == -ENOENT)
+ ret = 0;
+ return ret;
}
/*
@@ -324,7 +330,7 @@ do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
: stackinfo_get_unknown(); \
})
-static __always_inline void
+static __always_inline int
kunwind_stack_walk(kunwind_consume_fn consume_state,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
@@ -352,7 +358,7 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
if (regs) {
if (task != current)
- return;
+ return -EINVAL;
kunwind_init_from_regs(&state, regs);
} else if (task == current) {
kunwind_init_from_caller(&state);
@@ -360,7 +366,7 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
kunwind_init_from_task(&state, task);
}
- do_kunwind(&state, consume_state, cookie);
+ return do_kunwind(&state, consume_state, cookie);
}
struct kunwind_consume_entry_data {
@@ -387,6 +393,21 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
}
+noinline noinstr int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
+{
+ int ret;
+
+ struct kunwind_consume_entry_data data = {
+ .consume_entry = consume_entry,
+ .cookie = cookie,
+ };
+
+ ret = kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, NULL);
+
+ return ret;
+}
+
struct bpf_unwind_consume_entry_data {
bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
void *cookie;
--
2.43.5
Powered by blists - more mailing lists