[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aQIltqoIVDwh4A6p@J2N7QTR9R3>
Date: Wed, 29 Oct 2025 14:33:26 +0000
From: Mark Rutland <mark.rutland@....com>
To: Ben Niu <benniu@...a.com>
Cc: Catalin Marinas <catalin.marinas@....com>,
	Will Deacon <will@...nel.org>, tytso@....edu, Jason@...c4.com,
	Ben Niu <niuben003@...il.com>, linux-arm-kernel@...ts.infradead.org,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH] tracing: Enable kprobe tracing for Arm64 asm functions
On Mon, Oct 27, 2025 at 11:17:49AM -0700, Ben Niu wrote:
> Currently, Arm64 assembly functions always have a bti c
> instruction inserted before the prologue. When ftrace is enabled,
> no padding nops are inserted at all.
> 
> This breaks kprobe tracing for asm functions, which assumes that
> proper nops are added before and within functions (when ftrace is
> enabled) and bti c is only present when CONFIG_ARM64_BTI_KERNEL is
> defined.
What exactly do you mean by "breaks kprobe tracing"?
The kprobes code knows NOTHING about those ftrace NOPs, so I cannot see
how those are relevant.
The patch adds entries to __patchable_function_entries, which is owned
by ftrace, and has NOTHING to do with kprobes.
> The patch fixes the bug by inserting nops and bti c in Arm64 asm
> in the same way as compiled C code.
As it stands, NAK to this change.
I'm not averse to making (some) assembly functions traceable by ftrace,
and hence giving those NOPs. However, that's not safe generally (e.g.
due to noinstr requirements), and so special care will need to be taken.
The rationale above does not make sense; it conflates distinct things,
and I think a more complete explanation is necessary.
Mark.
> Note: although this patch unblocks kprobe tracing, fentry is still
> broken because no BTF info gets generated from assembly files. A
> separate patch is needed to fix that.
> 
> I built this patch with different combos of the following features
> and confirmed kprobe tracing for asm function __arch_copy_to_user
> worked in all cases:
> 
> CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
> CONFIG_DYNAMIC_FTRACE_WITH_ARGS
> CONFIG_ARM64_BTI_KERNEL
> 
> Signed-off-by: Ben Niu <benniu@...a.com>
> ---
>  arch/arm64/include/asm/linkage.h           | 103 ++++++++++++++++-----
>  arch/arm64/kernel/vdso/vgetrandom-chacha.S |   2 +-
>  2 files changed, 81 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
> index d3acd9c87509..f3f3bc168162 100644
> --- a/arch/arm64/include/asm/linkage.h
> +++ b/arch/arm64/include/asm/linkage.h
> @@ -5,8 +5,47 @@
>  #include <asm/assembler.h>
>  #endif
>  
> -#define __ALIGN		.balign CONFIG_FUNCTION_ALIGNMENT
> -#define __ALIGN_STR	".balign " #CONFIG_FUNCTION_ALIGNMENT
> +#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT
> +#define __ALIGN_STR ".balign " #CONFIG_FUNCTION_ALIGNMENT
> +
> +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
> +
> +#define PRE_FUNCTION_NOPS                                                   \
> +	ALIGN;                                                              \
> +	nops CONFIG_FUNCTION_ALIGNMENT / 4 - 2;                             \
> +	.pushsection __patchable_function_entries, "awo", @progbits, .text; \
> +	.p2align 3;                                                         \
> +	.8byte 1f;                                                          \
> +	.popsection;                                                        \
> +	1 :;                                                                \
> +	nops 2;
> +
> +#define PRE_PROLOGUE_NOPS nops 2;
> +
> +#elif defined(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)
> +
> +#define PRE_FUNCTION_NOPS
> +
> +#define PRE_PROLOGUE_NOPS                                                   \
> +	.pushsection __patchable_function_entries, "awo", @progbits, .text; \
> +	.p2align 3;                                                         \
> +	.8byte 1f;                                                          \
> +	.popsection;                                                        \
> +	1 :;                                                                \
> +	nops 2;
> +
> +#else
> +
> +#define PRE_FUNCTION_NOPS
> +#define PRE_PROLOGUE_NOPS
> +
> +#endif
> +
> +#ifdef CONFIG_ARM64_BTI_KERNEL
> +#define BTI_C bti c;
> +#else
> +#define BTI_C
> +#endif
>  
>  /*
>   * When using in-kernel BTI we need to ensure that PCS-conformant
> @@ -15,32 +54,50 @@
>   * everything, the override is done unconditionally so we're more
>   * likely to notice any drift from the overridden definitions.
>   */
> -#define SYM_FUNC_START(name)				\
> -	SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)	\
> -	bti c ;
> +#define SYM_FUNC_START(name)                       \
> +	PRE_FUNCTION_NOPS                          \
> +	SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
> +	BTI_C                                      \
> +	PRE_PROLOGUE_NOPS
> +
> +#define SYM_FUNC_START_NOTRACE(name)               \
> +	SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
> +	BTI_C
>  
> -#define SYM_FUNC_START_NOALIGN(name)			\
> -	SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)	\
> -	bti c ;
> +#define SYM_FUNC_START_NOALIGN(name)              \
> +	PRE_FUNCTION_NOPS                         \
> +	SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \
> +	BTI_C                                     \
> +	PRE_PROLOGUE_NOPS
>  
> -#define SYM_FUNC_START_LOCAL(name)			\
> -	SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)	\
> -	bti c ;
> +#define SYM_FUNC_START_LOCAL(name)                \
> +	PRE_FUNCTION_NOPS                         \
> +	SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \
> +	BTI_C                                     \
> +	PRE_PROLOGUE_NOPS
>  
> -#define SYM_FUNC_START_LOCAL_NOALIGN(name)		\
> -	SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)	\
> -	bti c ;
> +#define SYM_FUNC_START_LOCAL_NOALIGN(name)       \
> +	PRE_FUNCTION_NOPS                        \
> +	SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \
> +	BTI_C                                    \
> +	PRE_PROLOGUE_NOPS
>  
> -#define SYM_FUNC_START_WEAK(name)			\
> -	SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)	\
> -	bti c ;
> +#define SYM_FUNC_START_WEAK(name)                \
> +	PRE_FUNCTION_NOPS                        \
> +	SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \
> +	BTI_C                                    \
> +	PRE_PROLOGUE_NOPS
>  
> -#define SYM_FUNC_START_WEAK_NOALIGN(name)		\
> -	SYM_START(name, SYM_L_WEAK, SYM_A_NONE)		\
> -	bti c ;
> +#define SYM_FUNC_START_WEAK_NOALIGN(name)       \
> +	PRE_FUNCTION_NOPS                       \
> +	SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
> +	BTI_C                                   \
> +	PRE_PROLOGUE_NOPS
>  
> -#define SYM_TYPED_FUNC_START(name)				\
> -	SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)	\
> -	bti c ;
> +#define SYM_TYPED_FUNC_START(name)                       \
> +	PRE_FUNCTION_NOPS                                \
> +	SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
> +	BTI_C                                            \
> +	PRE_PROLOGUE_NOPS
>  
>  #endif
> diff --git a/arch/arm64/kernel/vdso/vgetrandom-chacha.S b/arch/arm64/kernel/vdso/vgetrandom-chacha.S
> index 67890b445309..21c27b64cf9f 100644
> --- a/arch/arm64/kernel/vdso/vgetrandom-chacha.S
> +++ b/arch/arm64/kernel/vdso/vgetrandom-chacha.S
> @@ -40,7 +40,7 @@
>   *	x2: 8-byte counter input/output
>   *	x3: number of 64-byte block to write to output
>   */
> -SYM_FUNC_START(__arch_chacha20_blocks_nostack)
> +SYM_FUNC_START_NOTRACE(__arch_chacha20_blocks_nostack)
>  
>  	/* copy0 = "expand 32-byte k" */
>  	mov_q		x8, 0x3320646e61707865
> -- 
> 2.47.3
> 
> 
Powered by blists - more mailing lists
 
