[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211022152104.285488044@infradead.org>
Date: Fri, 22 Oct 2021 17:09:36 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: keescook@...omium.org, x86@...nel.org
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
mgorman@...e.de, bristot@...hat.com, akpm@...ux-foundation.org,
mark.rutland@....com, zhengqi.arch@...edance.com,
linux@...linux.org.uk, catalin.marinas@....com, will@...nel.org,
mpe@...erman.id.au, paul.walmsley@...ive.com, palmer@...belt.com,
hca@...ux.ibm.com, gor@...ux.ibm.com, borntraeger@...ibm.com,
linux-arch@...r.kernel.org, ardb@...nel.org
Subject: [PATCH 3/7] ARM: implement ARCH_STACKWALK
From: Ard Biesheuvel <ardb@...nel.org>
Implement the flavor of CONFIG_STACKTRACE that relies mostly on generic
code, and only need a small arch_stack_walk() helper that performs the
actual frame unwinding.
Note that this removes the SMP check that used to live in
__save_stack_trace(), but this is no longer needed now that the generic
version of save_stack_trace_tsk() takes care not to walk the call stack
of tasks that are live on other CPUs.
Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/arm/Kconfig | 1
arch/arm/kernel/stacktrace.c | 114 +++++++------------------------------------
2 files changed, 20 insertions(+), 95 deletions(-)
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -24,6 +24,7 @@ config ARM
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K
+ select ARCH_STACKWALK
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -2,6 +2,7 @@
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <asm/sections.h>
@@ -87,106 +88,29 @@ void notrace walk_stackframe(struct stac
EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
-struct stack_trace_data {
- struct stack_trace *trace;
- unsigned int no_sched_functions;
- unsigned int skip;
-};
-
-static int save_trace(struct stackframe *frame, void *d)
-{
- struct stack_trace_data *data = d;
- struct stack_trace *trace = data->trace;
- struct pt_regs *regs;
- unsigned long addr = frame->pc;
-
- if (data->no_sched_functions && in_sched_functions(addr))
- return 0;
- if (data->skip) {
- data->skip--;
- return 0;
- }
-
- trace->entries[trace->nr_entries++] = addr;
-
- if (trace->nr_entries >= trace->max_entries)
- return 1;
-
- if (!in_entry_text(frame->pc))
- return 0;
-
- regs = (struct pt_regs *)frame->sp;
- if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE))
- return 0;
-
- trace->entries[trace->nr_entries++] = regs->ARM_pc;
-
- return trace->nr_entries >= trace->max_entries;
-}
-
-/* This must be noinline to so that our skip calculation works correctly */
-static noinline void __save_stack_trace(struct task_struct *tsk,
- struct stack_trace *trace, unsigned int nosched)
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
{
- struct stack_trace_data data;
struct stackframe frame;
- data.trace = trace;
- data.skip = trace->skip;
- data.no_sched_functions = nosched;
-
- if (tsk != current) {
-#ifdef CONFIG_SMP
- /*
- * What guarantees do we have here that 'tsk' is not
- * running on another CPU? For now, ignore it as we
- * can't guarantee we won't explode.
- */
- return;
-#else
- frame.fp = thread_saved_fp(tsk);
- frame.sp = thread_saved_sp(tsk);
- frame.lr = 0; /* recovered from the stack */
- frame.pc = thread_saved_pc(tsk);
-#endif
+ if (regs) {
+ frame.fp = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? regs->ARM_r7
+ : regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
} else {
- /* We don't want this function nor the caller */
- data.skip += 2;
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_stack_pointer;
- frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)__save_stack_trace;
+ frame.fp = thread_saved_fp(task);
+ frame.sp = thread_saved_sp(task);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(task);
}
- walk_stackframe(&frame, save_trace, &data);
-}
-
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
-{
- struct stack_trace_data data;
- struct stackframe frame;
-
- data.trace = trace;
- data.skip = trace->skip;
- data.no_sched_functions = 0;
-
- frame.fp = regs->ARM_fp;
- frame.sp = regs->ARM_sp;
- frame.lr = regs->ARM_lr;
- frame.pc = regs->ARM_pc;
-
- walk_stackframe(&frame, save_trace, &data);
-}
-
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-{
- __save_stack_trace(tsk, trace, 1);
-}
-EXPORT_SYMBOL(save_stack_trace_tsk);
-
-void save_stack_trace(struct stack_trace *trace)
-{
- __save_stack_trace(current, trace, 0);
+ for (;;) {
+ if (unwind_frame(&frame) < 0 ||
+ !consume_entry(cookie, frame.pc))
+ break;
+ }
}
-EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
Powered by blists - more mailing lists