lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <3c8e08d5-1069-4df8-5f53-5bd5c6aedd75@sangfor.com.cn>
Date:   Mon, 27 Mar 2023 11:03:39 +0800
From:   Donglin Peng <pengdonglin@...gfor.com.cn>
To:     Mark Rutland <mark.rutland@....com>
Cc:     mhiramat@...nel.org, rostedt@...dmis.org, linux@...linux.org.uk,
        will@...nel.org, catalin.marinas@....com, palmer@...belt.com,
        paul.walmsley@...ive.com, tglx@...utronix.de,
        dave.hansen@...ux.intel.com, x86@...nel.org, hpa@...or.com,
        chenhuacai@...nel.org, zhangqing@...ngson.cn, kernel@...0n.name,
        mingo@...hat.com, peterz@...radead.org, xiehuan09@...il.com,
        dinghui@...gfor.com.cn, huangcun@...gfor.com.cn,
        dolinux.peng@...il.com, linux-trace-kernel@...r.kernel.org,
        loongarch@...ts.linux.dev, linux-riscv@...ts.infradead.org,
        linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v7 4/8] arm64: ftrace: Enable HAVE_FUNCTION_GRAPH_RETVAL

On 2023/3/24 20:57, Mark Rutland wrote:
> On Fri, Mar 24, 2023 at 05:37:27AM -0700, Donglin Peng wrote:
>> The commit d4815c5d1bbd ("function_graph: Support recording and
>> printing the return value of function") laid the groundwork for the
>> for the funcgraph-retval, and this modification makes it available
>> on the ARM64 platform.
>>
>> We introduce a new structure called fgraph_ret_regs for the ARM64
>> platform to hold return registers and the frame pointer. We then
>> fill its content in the return_to_handler and pass its address to
>> the function ftrace_return_to_handler to record the return value.
> 
> I'm happy with this, or with using ftrace_regs and capturing more regs here.

Thanks, I think ftrace_regs has several members that are unnecessary to
fill. And each function that can be traced will do this work when using
function graph tracer, so the overhead is bit high.

> 
> This overall looks good, but there's one functional issue and a couple of minor
> nits which I've detailed below.

Thanks.

> 
>>
>> Signed-off-by: Donglin Peng <pengdonglin@...gfor.com.cn>
>> ---
>>   arch/arm64/Kconfig               |  1 +
>>   arch/arm64/include/asm/ftrace.h  | 23 +++++++++++++++++++++++
>>   arch/arm64/kernel/entry-ftrace.S |  9 +++++----
>>   3 files changed, 29 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>> index 1023e896d46b..48856d230800 100644
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
>> @@ -195,6 +195,7 @@ config ARM64
>>   	select HAVE_FTRACE_MCOUNT_RECORD
>>   	select HAVE_FUNCTION_TRACER
>>   	select HAVE_FUNCTION_ERROR_INJECTION
>> +	select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
>>   	select HAVE_FUNCTION_GRAPH_TRACER
>>   	select HAVE_GCC_PLUGINS
>>   	select HAVE_HW_BREAKPOINT if PERF_EVENTS
>> diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
>> index 1c2672bbbf37..f68dcc41be3b 100644
>> --- a/arch/arm64/include/asm/ftrace.h
>> +++ b/arch/arm64/include/asm/ftrace.h
>> @@ -170,4 +170,27 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
>>   }
>>   #endif /* ifndef __ASSEMBLY__ */
>>   
>> +#ifndef __ASSEMBLY__
>> +
>> +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
>> +struct fgraph_ret_regs {
>> +	/* x0 - x7 */
>> +	u64 regs[8];
>> +
>> +	u64 fp;
>> +};
> 
> As a minor nit, for ftrace_regs we used `unsigned long` rather than `u64`;
> could we do the same here for consistency?

Yes, I will fix it.

> 
> This will need to be padded to 16 bytes, as within the kernel, arm64 requires
> the SP to be aligned to 16 bytes at all time. Please can you add an `__unused`
> field, like we have in ftrace_regs, to ensure that?
> 

Yes, I will fix it.

>> +
>> +static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
>> +{
>> +	return ret_regs->regs[0];
>> +}
>> +
>> +static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
>> +{
>> +	return ret_regs->fp;
>> +}
>> +#endif
>> +
>> +#endif
>> +
>>   #endif /* __ASM_FTRACE_H */
>> diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
>> index 350ed81324ac..8ac6f952e68f 100644
>> --- a/arch/arm64/kernel/entry-ftrace.S
>> +++ b/arch/arm64/kernel/entry-ftrace.S
>> @@ -270,14 +270,15 @@ SYM_FUNC_END(ftrace_stub_graph)
>>    */
>>   SYM_CODE_START(return_to_handler)
>>   	/* save return value regs */
>> -	sub sp, sp, #64
>> +	sub sp, sp, #72
>>   	stp x0, x1, [sp]
>>   	stp x2, x3, [sp, #16]
>>   	stp x4, x5, [sp, #32]
>>   	stp x6, x7, [sp, #48]
>> +	str x29,    [sp, #64]		//     parent's fp
> 
> As above, this will need to be padded to keep the stack aligned to 16 bytes,
> and I'd prefer if we could use asm-offsets so that we can have something like:
> 
> 	sub	sp, sp, #FRET_REGS_SIZE
> 	stp	x0, x1, [sp, #FRET_REGS_X0]
> 	stp	x2, x3, [sp, #FRET_REGS_X2]
> 	stp	x4, x5, [sp, #FRET_REGS_X4]
> 	stp	x6, x7, [sp, #FRET_REGS_X6]
> 	str	x29, [sp, FRET_REGS_FP]

Thank you, I will fix it.

> 
>>   
>> -	mov	x0, x29			//     parent's fp
>> -	bl	ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
>> +	mov	x0, sp
>> +	bl	ftrace_return_to_handler// addr = ftrace_return_to_hander(regs);
>>   	mov	x30, x0			// restore the original return address
>>   
>>   	/* restore return value regs */
>> @@ -285,7 +286,7 @@ SYM_CODE_START(return_to_handler)
>>   	ldp x2, x3, [sp, #16]
>>   	ldp x4, x5, [sp, #32]
>>   	ldp x6, x7, [sp, #48]
>> -	add sp, sp, #64
>> +	add sp, sp, #72
> 
> Likewise here.

Thank you, I will fix it.

> 
> Other than that, this looks good to me, thanks for respinning!
> 
> Thanks,
> Mark.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ