[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAMj1kXG9ibOZfo60_pjwqACWhfPt8=38MDJD8C_CBoLrTYmCOw@mail.gmail.com>
Date: Thu, 14 Apr 2022 15:25:29 +0200
From: Ard Biesheuvel <ardb@...nel.org>
To: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc: Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
"# 3.4.x" <stable@...r.kernel.org>,
"Steven Rostedt (Google)" <rostedt@...dmis.org>,
Sasha Levin <sashal@...nel.org>
Subject: Re: [PATCH 4.19 208/338] ARM: ftrace: avoid redundant loads or
clobbering IP
On Thu, 14 Apr 2022 at 15:23, Greg Kroah-Hartman
<gregkh@...uxfoundation.org> wrote:
>
> From: Ard Biesheuvel <ardb@...nel.org>
>
> [ Upstream commit d11967870815b5ab89843980e35aab616c97c463 ]
>
NAK. Please don't backport these patches to -stable, I thought I had
been clear on this.
> Tweak the ftrace return paths to avoid redundant loads of SP, as well as
> unnecessary clobbering of IP.
>
> This also fixes the inconsistency of using MOV to perform a function
> return, which is sub-optimal on recent micro-architectures but more
> importantly, does not perform an interworking return, unlike compiler
> generated function returns in Thumb2 builds.
>
> Let's fix this by popping PC from the stack like most ordinary code
> does.
>
> Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
> Reviewed-by: Steven Rostedt (Google) <rostedt@...dmis.org>
> Signed-off-by: Sasha Levin <sashal@...nel.org>
> ---
> arch/arm/kernel/entry-ftrace.S | 51 +++++++++++++++-------------------
> 1 file changed, 22 insertions(+), 29 deletions(-)
>
> diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
> index 1acf4d05e94c..393c342ecd51 100644
> --- a/arch/arm/kernel/entry-ftrace.S
> +++ b/arch/arm/kernel/entry-ftrace.S
> @@ -41,10 +41,7 @@
> * mcount can be thought of as a function called in the middle of a subroutine
> * call. As such, it needs to be transparent for both the caller and the
> * callee: the original lr needs to be restored when leaving mcount, and no
> - * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
> - * clobber the ip register. This is OK because the ARM calling convention
> - * allows it to be clobbered in subroutines and doesn't use it to hold
> - * parameters.)
> + * registers should be clobbered.
> *
> * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
> * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
> @@ -96,26 +93,25 @@
>
> .macro __ftrace_regs_caller
>
> - sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
> + str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0,
> @ OLD_R0 will overwrite previous LR
>
> - add ip, sp, #12 @ move in IP the value of SP as it was
> - @ before the push {lr} of the mcount mechanism
> + ldr lr, [sp, #8] @ get previous LR
>
> - str lr, [sp, #0] @ store LR instead of PC
> + str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
>
> - ldr lr, [sp, #8] @ get previous LR
> + str lr, [sp, #-4]! @ store previous LR as LR
>
> - str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
> + add lr, sp, #16 @ move in LR the value of SP as it was
> + @ before the push {lr} of the mcount mechanism
>
> - stmdb sp!, {ip, lr}
> - stmdb sp!, {r0-r11, lr}
> + push {r0-r11, ip, lr}
>
> @ stack content at this point:
> @ 0 4 48 52 56 60 64 68 72
> - @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
> + @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 |
>
> - mov r3, sp @ struct pt_regs*
> + mov r3, sp @ struct pt_regs*
>
> ldr r2, =function_trace_op
> ldr r2, [r2] @ pointer to the current
> @@ -138,11 +134,9 @@ ftrace_graph_regs_call:
> #endif
>
> @ pop saved regs
> - ldmia sp!, {r0-r12} @ restore r0 through r12
> - ldr ip, [sp, #8] @ restore PC
> - ldr lr, [sp, #4] @ restore LR
> - ldr sp, [sp, #0] @ restore SP
> - mov pc, ip @ return
> + pop {r0-r11, ip, lr} @ restore r0 through r12
> + ldr lr, [sp], #4 @ restore LR
> + ldr pc, [sp], #12
> .endm
>
> #ifdef CONFIG_FUNCTION_GRAPH_TRACER
> @@ -158,11 +152,9 @@ ftrace_graph_regs_call:
> bl prepare_ftrace_return
>
> @ pop registers saved in ftrace_regs_caller
> - ldmia sp!, {r0-r12} @ restore r0 through r12
> - ldr ip, [sp, #8] @ restore PC
> - ldr lr, [sp, #4] @ restore LR
> - ldr sp, [sp, #0] @ restore SP
> - mov pc, ip @ return
> + pop {r0-r11, ip, lr} @ restore r0 through r12
> + ldr lr, [sp], #4 @ restore LR
> + ldr pc, [sp], #12
>
> .endm
> #endif
> @@ -273,16 +265,17 @@ ENDPROC(ftrace_graph_caller_old)
> .endm
>
> .macro mcount_exit
> - ldmia sp!, {r0-r3, ip, lr}
> - ret ip
> + ldmia sp!, {r0-r3}
> + ldr lr, [sp, #4]
> + ldr pc, [sp], #8
> .endm
>
> ENTRY(__gnu_mcount_nc)
> UNWIND(.fnstart)
> #ifdef CONFIG_DYNAMIC_FTRACE
> - mov ip, lr
> - ldmia sp!, {lr}
> - ret ip
> + push {lr}
> + ldr lr, [sp, #4]
> + ldr pc, [sp], #8
> #else
> __mcount
> #endif
> --
> 2.34.1
>
>
>
Powered by blists - more mailing lists