[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f2cbccc0-1bab-48a3-b837-ce875df21efe@linux.microsoft.com>
Date: Thu, 30 Jan 2025 16:06:57 +0530
From: Prasanna Kumar T S M <ptsm@...ux.microsoft.com>
To: Weinan Liu <wnliu@...gle.com>, Josh Poimboeuf <jpoimboe@...nel.org>,
Steven Rostedt <rostedt@...dmis.org>, Indu Bhagat <indu.bhagat@...cle.com>,
Peter Zijlstra <peterz@...radead.org>
Cc: Mark Rutland <mark.rutland@....com>, roman.gushchin@...ux.dev,
Will Deacon <will@...nel.org>, Ian Rogers <irogers@...gle.com>,
linux-toolchains@...r.kernel.org, linux-kernel@...r.kernel.org,
live-patching@...r.kernel.org, joe.lawrence@...hat.com,
linux-arm-kernel@...ts.infradead.org
Subject: Re: [PATCH 6/8] unwind: arm64: add reliable stacktrace support for
arm64
On 28-01-2025 03:03, Weinan Liu wrote:
> To support livepatch, we need to add arch_stack_walk_reliable to
> support reliable stacktrace according to
> https://docs.kernel.org/livepatch/reliable-stacktrace.html#requirements
>
> report stacktrace is not reliable if we are not able to unwind the stack
> by sframe unwinder and fallback to FP based unwinder
>
> Signed-off-by: Weinan Liu <wnliu@...gle.com>
> ---
> arch/arm64/include/asm/stacktrace/common.h | 2 +
> arch/arm64/kernel/stacktrace.c | 47 +++++++++++++++++++++-
> 2 files changed, 47 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
> index 19edae8a5b1a..26449cd402db 100644
> --- a/arch/arm64/include/asm/stacktrace/common.h
> +++ b/arch/arm64/include/asm/stacktrace/common.h
> @@ -26,6 +26,7 @@ struct stack_info {
> * @stacks: An array of stacks which can be unwound.
> * @nr_stacks: The number of stacks in @stacks.
> * @cfa: The sp value at the call site of the current function.
> + * @unreliable: Stacktrace is unreliable.
> */
> struct unwind_state {
> unsigned long fp;
> @@ -36,6 +37,7 @@ struct unwind_state {
> int nr_stacks;
> #ifdef CONFIG_SFRAME_UNWINDER
> unsigned long cfa;
> + bool unreliable;
> #endif
> };
>
> diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
> index c035adb8fe8a..eab16dc05bb5 100644
> --- a/arch/arm64/kernel/stacktrace.c
> +++ b/arch/arm64/kernel/stacktrace.c
> @@ -310,11 +310,16 @@ kunwind_next(struct kunwind_state *state)
> case KUNWIND_SOURCE_TASK:
> case KUNWIND_SOURCE_REGS_PC:
> #ifdef CONFIG_SFRAME_UNWINDER
> - err = unwind_next_frame_sframe(&state->common);
> + if (!state->common.unreliable)
> + err = unwind_next_frame_sframe(&state->common);
>
> /* Fallback to FP based unwinder */
> - if (err)
> + if (err || state->common.unreliable) {
> err = kunwind_next_frame_record(state);
> + /* Mark its stacktrace result as unreliable if it is unwindable via FP */
> + if (!err)
> + state->common.unreliable = true;
> + }
> #else
> err = kunwind_next_frame_record(state);
> #endif
> @@ -446,6 +451,44 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
> kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
> }
>
> +#ifdef CONFIG_SFRAME_UNWINDER
> +struct kunwind_reliable_consume_entry_data {
> + stack_trace_consume_fn consume_entry;
> + void *cookie;
> + bool unreliable;
> +};
> +
> +static __always_inline bool
> +arch_kunwind_reliable_consume_entry(const struct kunwind_state *state, void *cookie)
> +{
> + struct kunwind_reliable_consume_entry_data *data = cookie;
> +
> + if (state->common.unreliable) {
> + data->unreliable = true;
> + return false;
> + }
> + return data->consume_entry(data->cookie, state->common.pc);
> +}
> +
> +noinline notrace int arch_stack_walk_reliable(
> + stack_trace_consume_fn consume_entry,
> + void *cookie, struct task_struct *task)
> +{
> + struct kunwind_reliable_consume_entry_data data = {
> + .consume_entry = consume_entry,
> + .cookie = cookie,
> + .unreliable = false,
> + };
> +
> + kunwind_stack_walk(arch_kunwind_reliable_consume_entry, &data, task, NULL);
> +
> + if (data.unreliable)
> + return -EINVAL;
> +
> + return 0;
> +}
> +#endif
> +
> struct bpf_unwind_consume_entry_data {
> bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
> void *cookie;
Why not fold the previous patch and this into one?
But the code looks good to me.
Reviewed-by: Prasanna Kumar T S M <ptsm@...ux.microsoft.com>.
Powered by blists - more mailing lists