[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTyDbH=7Bqo61CEadSKRsRHKsSWQcf=kbx5T_Fsj0-bL4g@mail.gmail.com>
Date: Fri, 15 Jul 2022 14:58:40 +0100
From: Fuad Tabba <tabba@...gle.com>
To: Kalesh Singh <kaleshsingh@...gle.com>
Cc: maz@...nel.org, mark.rutland@....com, broonie@...nel.org,
madvenka@...ux.microsoft.com, will@...nel.org, qperret@...gle.com,
james.morse@....com, alexandru.elisei@....com,
suzuki.poulose@....com, catalin.marinas@....com,
andreyknvl@...il.com, russell.king@...cle.com,
vincenzo.frascino@....com, mhiramat@...nel.org, ast@...nel.org,
drjones@...hat.com, wangkefeng.wang@...wei.com, elver@...gle.com,
keirf@...gle.com, yuzenghui@...wei.com, ardb@...nel.org,
oupton@...gle.com, linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.cs.columbia.edu, linux-kernel@...r.kernel.org,
android-mm@...gle.com, kernel-team@...roid.com
Subject: Re: [PATCH v4 03/18] arm64: stacktrace: Factor out unwind_next_common()
Hi Kalesh,
On Fri, Jul 15, 2022 at 7:11 AM Kalesh Singh <kaleshsingh@...gle.com> wrote:
>
> Move common unwind_next logic to stacktrace/common.h. This allows
> reusing the code in the implementation the nVHE hypervisor stack
> unwinder, later in this series.
>
> Signed-off-by: Kalesh Singh <kaleshsingh@...gle.com>
Reviewed-by: Fuad Tabba <tabba@...gle.com>
Thanks,
/fuad
> ---
> arch/arm64/include/asm/stacktrace/common.h | 50 ++++++++++++++++++++++
> arch/arm64/kernel/stacktrace.c | 41 ++----------------
> 2 files changed, 54 insertions(+), 37 deletions(-)
>
> diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
> index f58b786460d3..0c5cbfdb56b5 100644
> --- a/arch/arm64/include/asm/stacktrace/common.h
> +++ b/arch/arm64/include/asm/stacktrace/common.h
> @@ -65,6 +65,10 @@ struct unwind_state {
> static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
> struct stack_info *info);
>
> +static inline bool on_accessible_stack(const struct task_struct *tsk,
> + unsigned long sp, unsigned long size,
> + struct stack_info *info);
> +
> static inline bool on_stack(unsigned long sp, unsigned long size,
> unsigned long low, unsigned long high,
> enum stack_type type, struct stack_info *info)
> @@ -120,4 +124,50 @@ static inline void unwind_init_common(struct unwind_state *state,
> state->prev_type = STACK_TYPE_UNKNOWN;
> }
>
> +static inline int unwind_next_common(struct unwind_state *state,
> + struct stack_info *info)
> +{
> + struct task_struct *tsk = state->task;
> + unsigned long fp = state->fp;
> +
> + if (fp & 0x7)
> + return -EINVAL;
> +
> + if (!on_accessible_stack(tsk, fp, 16, info))
> + return -EINVAL;
> +
> + if (test_bit(info->type, state->stacks_done))
> + return -EINVAL;
> +
> + /*
> + * As stacks grow downward, any valid record on the same stack must be
> + * at a strictly higher address than the prior record.
> + *
> + * Stacks can nest in several valid orders, e.g.
> + *
> + * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
> + * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
> + *
> + * ... but the nesting itself is strict. Once we transition from one
> + * stack to another, it's never valid to unwind back to that first
> + * stack.
> + */
> + if (info->type == state->prev_type) {
> + if (fp <= state->prev_fp)
> + return -EINVAL;
> + } else {
> + __set_bit(state->prev_type, state->stacks_done);
> + }
> +
> + /*
> + * Record this frame record's values and location. The prev_fp and
> + * prev_type are only meaningful to the next unwind_next() invocation.
> + */
> + state->fp = READ_ONCE(*(unsigned long *)(fp));
> + state->pc = READ_ONCE(*(unsigned long *)(fp + 8));
> + state->prev_fp = fp;
> + state->prev_type = info->type;
> +
> + return 0;
> +}
> #endif /* __ASM_STACKTRACE_COMMON_H */
> diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
> index 94a5dd2ab8fd..834851939364 100644
> --- a/arch/arm64/kernel/stacktrace.c
> +++ b/arch/arm64/kernel/stacktrace.c
> @@ -81,48 +81,15 @@ static int notrace unwind_next(struct unwind_state *state)
> struct task_struct *tsk = state->task;
> unsigned long fp = state->fp;
> struct stack_info info;
> + int err;
>
> /* Final frame; nothing to unwind */
> if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
> return -ENOENT;
>
> - if (fp & 0x7)
> - return -EINVAL;
> -
> - if (!on_accessible_stack(tsk, fp, 16, &info))
> - return -EINVAL;
> -
> - if (test_bit(info.type, state->stacks_done))
> - return -EINVAL;
> -
> - /*
> - * As stacks grow downward, any valid record on the same stack must be
> - * at a strictly higher address than the prior record.
> - *
> - * Stacks can nest in several valid orders, e.g.
> - *
> - * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
> - * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
> - *
> - * ... but the nesting itself is strict. Once we transition from one
> - * stack to another, it's never valid to unwind back to that first
> - * stack.
> - */
> - if (info.type == state->prev_type) {
> - if (fp <= state->prev_fp)
> - return -EINVAL;
> - } else {
> - __set_bit(state->prev_type, state->stacks_done);
> - }
> -
> - /*
> - * Record this frame record's values and location. The prev_fp and
> - * prev_type are only meaningful to the next unwind_next() invocation.
> - */
> - state->fp = READ_ONCE(*(unsigned long *)(fp));
> - state->pc = READ_ONCE(*(unsigned long *)(fp + 8));
> - state->prev_fp = fp;
> - state->prev_type = info.type;
> + err = unwind_next_common(state, &info);
> + if (err)
> + return err;
>
> state->pc = ptrauth_strip_insn_pac(state->pc);
>
> --
> 2.37.0.170.g444d1eabd0-goog
>
Powered by blists - more mailing lists