[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <166627907737.401.15310729251273334368.tip-bot2@tip-bot2>
Date: Thu, 20 Oct 2022 15:17:57 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Peter Zijlstra (Intel)" <peterz@...radead.org>,
Mark Rutland <mark.rutland@....com>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: x86/urgent] ftrace,kcfi: Separate ftrace_stub() and ftrace_stub_graph()
The following commit has been merged into the x86/urgent branch of tip:
Commit-ID: 883bbbffa5a4ffd1915f8b42934dab81b7f87226
Gitweb: https://git.kernel.org/tip/883bbbffa5a4ffd1915f8b42934dab81b7f87226
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Tue, 18 Oct 2022 13:49:21 +02:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Thu, 20 Oct 2022 17:10:27 +02:00
ftrace,kcfi: Separate ftrace_stub() and ftrace_stub_graph()
Different function signatures means they needs to be different
functions; otherwise CFI gets upset.
As triggered by the ftrace boot tests:
[] CFI failure at ftrace_return_to_handler+0xac/0x16c (target: ftrace_stub+0x0/0x14; expected type: 0x0a5d5347)
Fixes: 3c516f89e17e ("x86: Add support for CONFIG_CFI_CLANG")
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Mark Rutland <mark.rutland@....com>
Tested-by: Mark Rutland <mark.rutland@....com>
Link: https://lkml.kernel.org/r/Y06dg4e1xF6JTdQq@hirez.programming.kicks-ass.net
---
arch/arm64/kernel/entry-ftrace.S | 7 ++++++-
arch/x86/kernel/ftrace_64.S | 17 +++++++++--------
include/asm-generic/vmlinux.lds.h | 18 ++++++++++++------
3 files changed, 27 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index bd5df50..795344a 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -7,6 +7,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/ftrace.h>
@@ -294,10 +295,14 @@ SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
-SYM_FUNC_START(ftrace_stub)
+SYM_TYPED_FUNC_START(ftrace_stub)
ret
SYM_FUNC_END(ftrace_stub)
+SYM_TYPED_FUNC_START(ftrace_stub_graph)
+ ret
+SYM_FUNC_END(ftrace_stub_graph)
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* void return_to_handler(void)
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index a90c55a..2a4be92 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -4,6 +4,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/ptrace.h>
#include <asm/ftrace.h>
#include <asm/export.h>
@@ -129,6 +130,14 @@
.endm
+SYM_TYPED_FUNC_START(ftrace_stub)
+ RET
+SYM_FUNC_END(ftrace_stub)
+
+SYM_TYPED_FUNC_START(ftrace_stub_graph)
+ RET
+SYM_FUNC_END(ftrace_stub_graph)
+
#ifdef CONFIG_DYNAMIC_FTRACE
SYM_FUNC_START(__fentry__)
@@ -176,11 +185,6 @@ SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
SYM_FUNC_END(ftrace_caller);
STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
-SYM_FUNC_START(ftrace_stub)
- UNWIND_HINT_FUNC
- RET
-SYM_FUNC_END(ftrace_stub)
-
SYM_FUNC_START(ftrace_regs_caller)
/* Save the current flags before any operations that can change them */
pushfq
@@ -282,9 +286,6 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
SYM_FUNC_START(__fentry__)
cmpq $ftrace_stub, ftrace_trace_function
jnz trace
-
-SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
- ENDBR
RET
trace:
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index c15de16..d06ada2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -162,6 +162,16 @@
#define PATCHABLE_DISCARDS *(__patchable_function_entries)
#endif
+#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
+/*
+ * Simply points to ftrace_stub, but with the proper protocol.
+ * Defined by the linker script in linux/vmlinux.lds.h
+ */
+#define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub;
+#else
+#define FTRACE_STUB_HACK
+#endif
+
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/*
* The ftrace call sites are logged to a section whose name depends on the
@@ -169,10 +179,6 @@
* FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
* dependencies for FTRACE_CALLSITE_SECTION's definition.
*
- * Need to also make ftrace_stub_graph point to ftrace_stub
- * so that the same stub location may have different protocols
- * and not mess up with C verifiers.
- *
* ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
* as some archs will have a different prototype for that function
* but ftrace_ops_list_func() will have a single prototype.
@@ -182,11 +188,11 @@
KEEP(*(__mcount_loc)) \
KEEP_PATCHABLE \
__stop_mcount_loc = .; \
- ftrace_stub_graph = ftrace_stub; \
+ FTRACE_STUB_HACK \
ftrace_ops_list_func = arch_ftrace_ops_list_func;
#else
# ifdef CONFIG_FUNCTION_TRACER
-# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; \
+# define MCOUNT_REC() FTRACE_STUB_HACK \
ftrace_ops_list_func = arch_ftrace_ops_list_func;
# else
# define MCOUNT_REC()
Powered by blists - more mailing lists