[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <169222080821.27769.5002641830063782263.tip-bot2@tip-bot2>
Date: Wed, 16 Aug 2023 21:20:08 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Peter Zijlstra (Intel)" <peterz@...radead.org>,
"Borislav Petkov (AMD)" <bp@...en8.de>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: x86/urgent] x86/cpu: Cleanup the untrain mess
The following commit has been merged into the x86/urgent branch of tip:
Commit-ID: e7c25c441e9e0fa75b4c83e0b26306b702cfe90d
Gitweb: https://git.kernel.org/tip/e7c25c441e9e0fa75b4c83e0b26306b702cfe90d
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Mon, 14 Aug 2023 13:44:34 +02:00
Committer: Borislav Petkov (AMD) <bp@...en8.de>
CommitterDate: Wed, 16 Aug 2023 21:58:59 +02:00
x86/cpu: Cleanup the untrain mess
Since there can only be one active return_thunk, there only needs be
one (matching) untrain_ret. It fundamentally doesn't make sense to
allow multiple untrain_ret at the same time.
Fold all the 3 different untrain methods into a single (temporary)
helper stub.
Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Borislav Petkov (AMD) <bp@...en8.de>
Link: https://lore.kernel.org/r/20230814121149.042774962@infradead.org
---
arch/x86/include/asm/nospec-branch.h | 19 +++++--------------
arch/x86/kernel/cpu/bugs.c | 1 +
arch/x86/lib/retpoline.S | 7 +++++++
3 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index f7c3375..5285c8e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -272,9 +272,9 @@
.endm
#ifdef CONFIG_CPU_UNRET_ENTRY
-#define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret"
+#define CALL_UNTRAIN_RET "call entry_untrain_ret"
#else
-#define CALL_ZEN_UNTRAIN_RET ""
+#define CALL_UNTRAIN_RET ""
#endif
/*
@@ -293,15 +293,10 @@
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
VALIDATE_UNRET_END
ALTERNATIVE_3 "", \
- CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
#endif
-
-#ifdef CONFIG_CPU_SRSO
- ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
- "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
-#endif
.endm
.macro UNTRAIN_RET_FROM_CALL
@@ -309,15 +304,10 @@
defined(CONFIG_CALL_DEPTH_TRACKING)
VALIDATE_UNRET_END
ALTERNATIVE_3 "", \
- CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
__stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
#endif
-
-#ifdef CONFIG_CPU_SRSO
- ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
- "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
-#endif
.endm
@@ -355,6 +345,7 @@ extern void retbleed_untrain_ret(void);
extern void srso_untrain_ret(void);
extern void srso_alias_untrain_ret(void);
+extern void entry_untrain_ret(void);
extern void entry_ibpb(void);
extern void (*x86_return_thunk)(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index bbbbda9..6f3e195 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -2460,6 +2460,7 @@ static void __init srso_select_mitigation(void)
* like ftrace, static_call, etc.
*/
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index d37e5ab..5e85da1 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -289,6 +289,13 @@ SYM_CODE_START(srso_return_thunk)
ud2
SYM_CODE_END(srso_return_thunk)
+SYM_FUNC_START(entry_untrain_ret)
+ ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
+ "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
+ "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+SYM_FUNC_END(entry_untrain_ret)
+__EXPORT_THUNK(entry_untrain_ret)
+
SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
Powered by blists - more mailing lists