[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220713110020.85511-6-lihuafei1@huawei.com>
Date: Wed, 13 Jul 2022 19:00:20 +0800
From: Li Huafei <lihuafei1@...wei.com>
To: <linux@...linux.org.uk>, <rmk+kernel@...linux.org.uk>,
<ardb@...nel.org>, <will@...nel.org>, <broonie@...nel.org>
CC: <mark.rutland@....com>, <peterz@...radead.org>, <mingo@...hat.com>,
<acme@...nel.org>, <alexander.shishkin@...ux.intel.com>,
<jolsa@...nel.org>, <namhyung@...nel.org>, <arnd@...db.de>,
<linus.walleij@...aro.org>, <rostedt@...dmis.org>,
<nick.hawkins@....com>, <john@...ozen.org>, <mhiramat@...nel.org>,
<ast@...nel.org>, <linyujun809@...wei.com>,
<ndesaulniers@...gle.com>, <lihuafei1@...wei.com>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>, <linux-perf-users@...r.kernel.org>
Subject: [PATCH v2 5/5] ARM: stacktrace: Convert stacktrace to generic ARCH_STACKWALK
Historically architectures have had duplicated code in their stack trace
implementations for filtering what gets traced. In order to avoid this
duplication some generic code has been provided using a new interface
arch_stack_walk(), enabled by selecting ARCH_STACKWALK in Kconfig, which
factors all this out into the generic stack trace code. Convert ARM to
use this common infrastructure.
When initializing the stack frame of the current task, arm64 uses
__builtin_frame_address(1) to initialize the frame pointer, skipping
arch_stack_walk(), see the commit c607ab4f916d ("arm64: stacktrace:
don't trace arch_ stack_walk()"). Since __builtin_frame_address(1) does
not work on ARM, unwind_frame() is used to unwind the stack one layer
forward before calling walk_stackframe().
Signed-off-by: Li Huafei <lihuafei1@...wei.com>
---
arch/arm/Kconfig | 1 +
arch/arm/kernel/stacktrace.c | 114 ++++++++++-------------------------
2 files changed, 33 insertions(+), 82 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7630ba9cb6cc..8da192853562 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -18,6 +18,7 @@ config ARM
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_MEMORY
+ select ARCH_STACKWALK
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index ec0ca3192775..1b9c91e14d2e 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -136,98 +136,48 @@ void notrace walk_stackframe(struct stackframe *frame,
EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
-struct stack_trace_data {
- struct stack_trace *trace;
- unsigned int no_sched_functions;
- unsigned int skip;
-};
-
-static bool save_trace(void *d, unsigned long addr)
-{
- struct stack_trace_data *data = d;
- struct stack_trace *trace = data->trace;
-
- if (data->no_sched_functions && in_sched_functions(addr))
- return true;
- if (data->skip) {
- data->skip--;
- return true;
- }
-
- trace->entries[trace->nr_entries++] = addr;
- return trace->nr_entries < trace->max_entries;
-}
-
-/* This must be noinline to so that our skip calculation works correctly */
-static noinline void __save_stack_trace(struct task_struct *tsk,
- struct stack_trace *trace, unsigned int nosched)
+static void start_stack_trace(struct stackframe *frame, struct task_struct *task,
+ unsigned long fp, unsigned long sp,
+ unsigned long lr, unsigned long pc)
{
- struct stack_trace_data data;
- struct stackframe frame;
-
- data.trace = trace;
- data.skip = trace->skip;
- data.no_sched_functions = nosched;
-
- if (tsk != current) {
- /* task blocked in __switch_to */
- frame.fp = thread_saved_fp(tsk);
- frame.sp = thread_saved_sp(tsk);
- frame.lr = 0; /* recovered from the stack */
- frame.pc = thread_saved_pc(tsk);
- } else {
- /* We don't want this function nor the caller */
- data.skip += 2;
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_stack_pointer;
- frame.lr = (unsigned long)__builtin_return_address(0);
-here:
- frame.pc = (unsigned long)&&here;
- }
+ frame->fp = fp;
+ frame->sp = sp;
+ frame->lr = lr;
+ frame->pc = pc;
#ifdef CONFIG_KRETPROBES
- frame.kr_cur = NULL;
- frame.tsk = tsk;
+ frame->kr_cur = NULL;
+ frame->tsk = task;
#endif
#ifdef CONFIG_UNWINDER_FRAME_POINTER
- frame.ex_frame = false;
+ frame->ex_frame = in_entry_text(frame->pc) ? true : false;
#endif
-
- walk_stackframe(&frame, save_trace, &data);
}
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
- struct stack_trace_data data;
struct stackframe frame;
- data.trace = trace;
- data.skip = trace->skip;
- data.no_sched_functions = 0;
-
- frame.fp = regs->ARM_fp;
- frame.sp = regs->ARM_sp;
- frame.lr = regs->ARM_lr;
- frame.pc = regs->ARM_pc;
-#ifdef CONFIG_KRETPROBES
- frame.kr_cur = NULL;
- frame.tsk = current;
-#endif
-#ifdef CONFIG_UNWINDER_FRAME_POINTER
- frame.ex_frame = in_entry_text(frame.pc) ? true : false;
-#endif
-
- walk_stackframe(&frame, save_trace, &data);
-}
-
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-{
- __save_stack_trace(tsk, trace, 1);
-}
-EXPORT_SYMBOL(save_stack_trace_tsk);
+ if (regs) {
+ start_stack_trace(&frame, NULL, regs->ARM_fp, regs->ARM_sp,
+ regs->ARM_lr, regs->ARM_pc);
+ } else if (task != current) {
+ /* task blocked in __switch_to */
+ start_stack_trace(&frame, task, thread_saved_fp(task),
+ thread_saved_sp(task), 0,
+ thread_saved_pc(task));
+ } else {
+here:
+ start_stack_trace(&frame, task,
+ (unsigned long)__builtin_frame_address(0),
+ current_stack_pointer,
+ (unsigned long)__builtin_return_address(0),
+ (unsigned long)&&here);
+ /* skip this function */
+ if (unwind_frame(&frame))
+ return;
+ }
-void save_stack_trace(struct stack_trace *trace)
-{
- __save_stack_trace(current, trace, 0);
+ walk_stackframe(&frame, consume_entry, cookie);
}
-EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
--
2.17.1
Powered by blists - more mailing lists