[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAMj1kXFL4abn9xg1ZrNpFg54Pmw1Kw8OPbDpMevSjQDNg0r5Pg@mail.gmail.com>
Date: Tue, 5 Apr 2022 12:01:19 +0200
From: Ard Biesheuvel <ardb@...nel.org>
To: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc: linux-kernel@...r.kernel.org, stable@...r.kernel.org,
"Steven Rostedt (Google)" <rostedt@...dmis.org>,
Sasha Levin <sashal@...nel.org>
Subject: Re: [PATCH 5.15 746/913] ARM: ftrace: avoid redundant loads or
clobbering IP
On Tue, 5 Apr 2022 at 11:54, Greg Kroah-Hartman
<gregkh@...uxfoundation.org> wrote:
>
> From: Ard Biesheuvel <ardb@...nel.org>
>
> [ Upstream commit d11967870815b5ab89843980e35aab616c97c463 ]
>
> Tweak the ftrace return paths to avoid redundant loads of SP, as well as
> unnecessary clobbering of IP.
>
> This also fixes the inconsistency of using MOV to perform a function
> return, which is sub-optimal on recent micro-architectures but more
> importantly, does not perform an interworking return, unlike compiler
> generated function returns in Thumb2 builds.
>
> Let's fix this by popping PC from the stack like most ordinary code
> does.
>
> Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
> Reviewed-by: Steven Rostedt (Google) <rostedt@...dmis.org>
> Signed-off-by: Sasha Levin <sashal@...nel.org>
Please drop all the 32-bit ARM patches authored by me from the stable
queues except the ones that have fixes tags. These are highly likely
to cause an explosion of regressions, and they should have never been
selected, as I don't remember anyone proposing these for stable.
> ---
> arch/arm/kernel/entry-ftrace.S | 51 +++++++++++++++-------------------
> 1 file changed, 22 insertions(+), 29 deletions(-)
>
> diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
> index f4886fb6e9ba..f33c171e3090 100644
> --- a/arch/arm/kernel/entry-ftrace.S
> +++ b/arch/arm/kernel/entry-ftrace.S
> @@ -22,10 +22,7 @@
> * mcount can be thought of as a function called in the middle of a subroutine
> * call. As such, it needs to be transparent for both the caller and the
> * callee: the original lr needs to be restored when leaving mcount, and no
> - * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
> - * clobber the ip register. This is OK because the ARM calling convention
> - * allows it to be clobbered in subroutines and doesn't use it to hold
> - * parameters.)
> + * registers should be clobbered.
> *
> * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
> * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
> @@ -70,26 +67,25 @@
>
> .macro __ftrace_regs_caller
>
> - sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
> + str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0,
> @ OLD_R0 will overwrite previous LR
>
> - add ip, sp, #12 @ move in IP the value of SP as it was
> - @ before the push {lr} of the mcount mechanism
> + ldr lr, [sp, #8] @ get previous LR
>
> - str lr, [sp, #0] @ store LR instead of PC
> + str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
>
> - ldr lr, [sp, #8] @ get previous LR
> + str lr, [sp, #-4]! @ store previous LR as LR
>
> - str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
> + add lr, sp, #16 @ move in LR the value of SP as it was
> + @ before the push {lr} of the mcount mechanism
>
> - stmdb sp!, {ip, lr}
> - stmdb sp!, {r0-r11, lr}
> + push {r0-r11, ip, lr}
>
> @ stack content at this point:
> @ 0 4 48 52 56 60 64 68 72
> - @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
> + @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 |
>
> - mov r3, sp @ struct pt_regs*
> + mov r3, sp @ struct pt_regs*
>
> ldr r2, =function_trace_op
> ldr r2, [r2] @ pointer to the current
> @@ -112,11 +108,9 @@ ftrace_graph_regs_call:
> #endif
>
> @ pop saved regs
> - ldmia sp!, {r0-r12} @ restore r0 through r12
> - ldr ip, [sp, #8] @ restore PC
> - ldr lr, [sp, #4] @ restore LR
> - ldr sp, [sp, #0] @ restore SP
> - mov pc, ip @ return
> + pop {r0-r11, ip, lr} @ restore r0 through r12
> + ldr lr, [sp], #4 @ restore LR
> + ldr pc, [sp], #12
> .endm
>
> #ifdef CONFIG_FUNCTION_GRAPH_TRACER
> @@ -132,11 +126,9 @@ ftrace_graph_regs_call:
> bl prepare_ftrace_return
>
> @ pop registers saved in ftrace_regs_caller
> - ldmia sp!, {r0-r12} @ restore r0 through r12
> - ldr ip, [sp, #8] @ restore PC
> - ldr lr, [sp, #4] @ restore LR
> - ldr sp, [sp, #0] @ restore SP
> - mov pc, ip @ return
> + pop {r0-r11, ip, lr} @ restore r0 through r12
> + ldr lr, [sp], #4 @ restore LR
> + ldr pc, [sp], #12
>
> .endm
> #endif
> @@ -202,16 +194,17 @@ ftrace_graph_call\suffix:
> .endm
>
> .macro mcount_exit
> - ldmia sp!, {r0-r3, ip, lr}
> - ret ip
> + ldmia sp!, {r0-r3}
> + ldr lr, [sp, #4]
> + ldr pc, [sp], #8
> .endm
>
> ENTRY(__gnu_mcount_nc)
> UNWIND(.fnstart)
> #ifdef CONFIG_DYNAMIC_FTRACE
> - mov ip, lr
> - ldmia sp!, {lr}
> - ret ip
> + push {lr}
> + ldr lr, [sp, #4]
> + ldr pc, [sp], #8
> #else
> __mcount
> #endif
> --
> 2.34.1
>
>
>
Powered by blists - more mailing lists