[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250528034712.138701-4-dongml2@chinatelecom.cn>
Date: Wed, 28 May 2025 11:46:50 +0800
From: Menglong Dong <menglong8.dong@...il.com>
To: alexei.starovoitov@...il.com,
rostedt@...dmis.org,
jolsa@...nel.org
Cc: bpf@...r.kernel.org,
Menglong Dong <dongml2@...natelecom.cn>,
linux-kernel@...r.kernel.org
Subject: [PATCH bpf-next 03/25] arm64: implement per-function metadata storage for arm64
The per-function metadata storage is already used by ftrace if
CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS is enabled, and it store the pointer
of the callback directly to the function padding, which consume 8-bytes,
in the commit
baaf553d3bc3 ("arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS").
So we can directly store the index to the function padding too, without
a prepending. With CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS enabled, the
function is 8-bytes aligned, and we will compile the kernel with extra
8-bytes (2 NOPS) padding space. Otherwise, the function is 4-bytes
aligned, and only extra 4-bytes (1 NOPS) is needed for us.
However, we have the same problem with Mark in the commit above: we can't
use the function padding together with CFI_CLANG, which can make the clang
compiles a wrong offset to the pre-function type hash. So we fallback to
the hash table mode for function metadata if CFI_CLANG is enabled.
Signed-off-by: Menglong Dong <dongml2@...natelecom.cn>
---
arch/arm64/Kconfig | 21 ++++++++++++++++++++
arch/arm64/Makefile | 23 ++++++++++++++++++++--
arch/arm64/include/asm/ftrace.h | 34 +++++++++++++++++++++++++++++++++
arch/arm64/kernel/ftrace.c | 13 +++++++++++--
4 files changed, 87 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a182295e6f08..db504df07072 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1549,6 +1549,27 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
+config FUNCTION_METADATA
+ bool "Per-function metadata storage support"
+ default y
+ select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE if !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
+ depends on !CFI_CLANG
+ help
+ Support function padding based per-function metadata storage for
+ kernel functions, and get the metadata of the function by its
+ address with almost no overhead.
+
+ The index of the metadata will be stored in the function padding,
+ which will consume 4-bytes. If FUNCTION_ALIGNMENT_8B is enabled,
+ extra 8-bytes function padding will be reserved during compiling.
+ Otherwise, only extra 4-bytes function padding is needed.
+
+ Hash table based function metadata will be used if this option
+ is not enabled.
+
+config FUNCTION_METADATA_PADDING
+ def_bool FUNCTION_METADATA
+
source "kernel/Kconfig.hz"
config ARCH_SPARSEMEM_ENABLE
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 1d5dfcd1c13e..576d6ab94dc5 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -144,12 +144,31 @@ endif
CHECKFLAGS += -D__aarch64__
+ifeq ($(CONFIG_FUNCTION_METADATA_PADDING),y)
+ ifeq ($(CONFIG_FUNCTION_ALIGNMENT_8B),y)
+ __padding_nops := 2
+ else
+ __padding_nops := 1
+ endif
+else
+ __padding_nops := 0
+endif
+
ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS),y)
+ __padding_nops := $(shell echo $(__padding_nops) + 2 | bc)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
- CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=$(shell echo $(__padding_nops) + 2 | bc),$(__padding_nops)
else ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=$(shell echo $(__padding_nops) + 2 | bc),$(__padding_nops)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
- CC_FLAGS_FTRACE := -fpatchable-function-entry=2
+else ifeq ($(CONFIG_FUNCTION_METADATA_PADDING),y)
+ CC_FLAGS_FTRACE += -fpatchable-function-entry=$(__padding_nops),$(__padding_nops)
+ ifneq ($(CONFIG_FUNCTION_TRACER),y)
+ KBUILD_CFLAGS += $(CC_FLAGS_FTRACE)
+ # some file need to remove this cflag when CONFIG_FUNCTION_TRACER
+ # is not enabled, so we need to export it here
+ export CC_FLAGS_FTRACE
+ endif
endif
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index bfe3ce9df197..9aafb3103829 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -24,6 +24,16 @@
#define FTRACE_PLT_IDX 0
#define NR_FTRACE_PLTS 1
+#ifdef CONFIG_FUNCTION_METADATA_PADDING
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+#define KFUNC_MD_DATA_OFFSET (AARCH64_INSN_SIZE * 3)
+#else
+#define KFUNC_MD_DATA_OFFSET AARCH64_INSN_SIZE
+#endif
+#define KFUNC_MD_INSN_SIZE AARCH64_INSN_SIZE
+#define KFUNC_MD_INSN_OFFSET KFUNC_MD_DATA_OFFSET
+#endif
+
/*
* Currently, gcc tends to save the link register after the local variables
* on the stack. This causes the max stack tracer to report the function
@@ -216,6 +226,30 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
*/
return !strcmp(sym + 8, name);
}
+
+#ifdef CONFIG_FUNCTION_METADATA_PADDING
+#include <asm/text-patching.h>
+
+static inline bool kfunc_md_arch_exist(unsigned long ip, int insn_offset)
+{
+ return !aarch64_insn_is_nop(*(u32 *)(ip - insn_offset));
+}
+
+static inline void kfunc_md_arch_pretend(u8 *insn, u32 index)
+{
+ *(u32 *)insn = index;
+}
+
+static inline void kfunc_md_arch_nops(u8 *insn)
+{
+ *(u32 *)insn = aarch64_insn_gen_nop();
+}
+
+static inline int kfunc_md_arch_poke(void *ip, u8 *insn, int insn_offset)
+{
+ return aarch64_insn_patch_text_nosync(ip, *(u32 *)insn);
+}
+#endif
#endif /* ifndef __ASSEMBLY__ */
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 5a890714ee2e..869946dabdd0 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -88,8 +88,10 @@ unsigned long ftrace_call_adjust(unsigned long addr)
* to `BL <caller>`, which is at `addr + 4` bytes in either case.
*
*/
- if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
- return addr + AARCH64_INSN_SIZE;
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) {
+ addr += AARCH64_INSN_SIZE;
+ goto out;
+ }
/*
* When using patchable-function-entry with pre-function NOPs, addr is
@@ -139,6 +141,13 @@ unsigned long ftrace_call_adjust(unsigned long addr)
/* Skip the first NOP after function entry */
addr += AARCH64_INSN_SIZE;
+out:
+ if (IS_ENABLED(CONFIG_FUNCTION_METADATA_PADDING)) {
+ if (IS_ENABLED(CONFIG_FUNCTION_ALIGNMENT_8B))
+ addr += 2 * AARCH64_INSN_SIZE;
+ else
+ addr += AARCH64_INSN_SIZE;
+ }
return addr;
}
--
2.39.5
Powered by blists - more mailing lists