lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.LSU.2.21.2007161342290.3958@pobox.suse.cz>
Date:   Thu, 16 Jul 2020 13:56:13 +0200 (CEST)
From:   Miroslav Benes <mbenes@...e.cz>
To:     Mark Brown <broonie@...nel.org>
cc:     Catalin Marinas <catalin.marinas@....com>,
        Will Deacon <will@...nel.org>,
        Heiko Carstens <hca@...ux.ibm.com>,
        Vasily Gorbik <gor@...ux.ibm.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Borislav Petkov <bp@...en8.de>,
        "H. Peter Anvin" <hpa@...or.com>,
        Christian Borntraeger <borntraeger@...ibm.com>,
        Ingo Molnar <mingo@...nel.org>,
        Jiri Slaby <jirislaby@...nel.org>, x86@...nel.org,
        linux-arm-kernel@...ts.infradead.org, linux-s390@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH 3/3] arm64: stacktrace: Convert to ARCH_STACKWALK

Hi,

On Wed, 15 Jul 2020, Mark Brown wrote:

> Historically architectures have had duplicated code in their stack trace
> implementations for filtering what gets traced. In order to avoid this
> duplication some generic code has been provided using a new interface
> arch_stack_walk(), enabled by selecting ARCH_STACKWALK in Kconfig, which
> factors all this out into the generic stack trace code. Convert arm64
> to use this common infrastructure.
> 
> Signed-off-by: Mark Brown <broonie@...nel.org>
> ---
>  arch/arm64/Kconfig             |  1 +
>  arch/arm64/kernel/stacktrace.c | 79 ++++------------------------------
>  2 files changed, 9 insertions(+), 71 deletions(-)
> 
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 5d4f02b3dfe9..6ed4b6c6df95 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -29,6 +29,7 @@ config ARM64
>  	select ARCH_HAS_SETUP_DMA_OPS
>  	select ARCH_HAS_SET_DIRECT_MAP
>  	select ARCH_HAS_SET_MEMORY
> +	select ARCH_STACKWALK
>  	select ARCH_HAS_STRICT_KERNEL_RWX
>  	select ARCH_HAS_STRICT_MODULE_RWX
>  	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
> diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
> index 743cf11fbfca..a33fba048954 100644
> --- a/arch/arm64/kernel/stacktrace.c
> +++ b/arch/arm64/kernel/stacktrace.c
> @@ -133,82 +133,19 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
>  NOKPROBE_SYMBOL(walk_stackframe);
>  
>  #ifdef CONFIG_STACKTRACE
> -struct stack_trace_data {
> -	struct stack_trace *trace;
> -	unsigned int no_sched_functions;
> -	unsigned int skip;
> -};
>  
> -static bool save_trace(void *d, unsigned long addr)
> +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
> +		     struct task_struct *task, struct pt_regs *regs)
>  {
> -	struct stack_trace_data *data = d;
> -	struct stack_trace *trace = data->trace;
> -
> -	if (data->no_sched_functions && in_sched_functions(addr))
> -		return false;
> -	if (data->skip) {
> -		data->skip--;
> -		return false;
> -	}
> -
> -	trace->entries[trace->nr_entries++] = addr;
> -
> -	return trace->nr_entries >= trace->max_entries;
> -}
> -
> -void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
> -{
> -	struct stack_trace_data data;
> -	struct stackframe frame;
> -
> -	data.trace = trace;
> -	data.skip = trace->skip;
> -	data.no_sched_functions = 0;
> -
> -	start_backtrace(&frame, regs->regs[29], regs->pc);
> -	walk_stackframe(current, &frame, save_trace, &data);
> -}
> -EXPORT_SYMBOL_GPL(save_stack_trace_regs);
> -
> -static noinline void __save_stack_trace(struct task_struct *tsk,
> -	struct stack_trace *trace, unsigned int nosched)
> -{
> -	struct stack_trace_data data;
>  	struct stackframe frame;
>  
> -	if (!try_get_task_stack(tsk))
> -		return;
> +	if (regs)
> +		start_backtrace(&frame, regs->regs[29], regs->pc);
> +	else
> +		start_backtrace(&frame, thread_saved_fp(task),
> +				thread_saved_pc(task));
>  
> -	data.trace = trace;
> -	data.skip = trace->skip;
> -	data.no_sched_functions = nosched;
> -
> -	if (tsk != current) {
> -		start_backtrace(&frame, thread_saved_fp(tsk),
> -				thread_saved_pc(tsk));
> -	} else {
> -		/* We don't want this function nor the caller */
> -		data.skip += 2;
> -		start_backtrace(&frame,
> -				(unsigned long)__builtin_frame_address(0),
> -				(unsigned long)__save_stack_trace);
> -	}
> -
> -	walk_stackframe(tsk, &frame, save_trace, &data);
> -
> -	put_task_stack(tsk);
> -}
> -
> -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
> -{
> -	__save_stack_trace(tsk, trace, 1);
> -}
> -EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
> -
> -void save_stack_trace(struct stack_trace *trace)
> -{
> -	__save_stack_trace(current, trace, 0);
> +	walk_stackframe(task, &frame, consume_entry, cookie);
>  }

just an idea for further improvement (and it might be a matter of taste). 
Wouldn't it be slightly better to do one more step and define "struct 
unwind_state" instead of "struct stackframe" and also some iterator for 
the unwinding and use that right in new arch_stack_walk() instead of 
walk_stackframe()? I mean, take the unbounded loop, "inline" it to 
arch_stack_walk() and replace the loop with the iterator. The body of the 
iterator would call to unwind_frame() and consume_entry() and that's it. 
It would make arm64 implementation very similar to x86 and s390 and thus 
easier to follow when one switches between architectures all the time.

Tangential to this patch, but another idea for improvement is in 
unwind_frame(). If I am not missing something, everything in 
CONFIG_FUNCTION_GRAPH_TRACER could be replaced by a simple call to 
ftrace_graph_ret_addr(). Again see for example unwind_next_frame() in
arch/s390/kernel/unwind_bc.c (x86 has it too).

Regards
Miroslav

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ