[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230821112723.3995187-2-andrew.cooper3@citrix.com>
Date: Mon, 21 Aug 2023 12:27:20 +0100
From: Andrew Cooper <andrew.cooper3@...rix.com>
To: LKML <linux-kernel@...r.kernel.org>
CC: Andrew Cooper <andrew.cooper3@...rix.com>, <x86@...nel.org>,
"Borislav Petkov" <bp@...en8.de>,
Peter Zijlstra <peterz@...radead.org>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Babu Moger <babu.moger@....com>, <David.Kaplan@....com>,
Nikolay Borisov <nik.borisov@...e.com>,
<gregkh@...uxfoundation.org>, Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 1/4] x86/srso: Rename srso_alias_*() to srso_fam19_*()
The 'alias' name name is an internal detail of how the logic works. Rename it
to state which microarchitecture is is applicable to.
Signed-off-by: Andrew Cooper <andrew.cooper3@...rix.com>
---
CC: x86@...nel.org
CC: linux-kernel@...r.kernel.org
CC: Borislav Petkov <bp@...en8.de>
CC: Peter Zijlstra <peterz@...radead.org>
CC: Josh Poimboeuf <jpoimboe@...nel.org>
CC: Babu Moger <babu.moger@....com>
CC: David.Kaplan@....com
CC: Nikolay Borisov <nik.borisov@...e.com>
CC: gregkh@...uxfoundation.org
CC: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/include/asm/nospec-branch.h | 4 ++--
arch/x86/kernel/cpu/bugs.c | 2 +-
arch/x86/kernel/vmlinux.lds.S | 8 +++----
arch/x86/lib/retpoline.S | 34 ++++++++++++++--------------
4 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index c55cc243592e..93e8de0bf94e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -350,11 +350,11 @@ static inline void __x86_return_thunk(void) {}
extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void);
-extern void srso_alias_return_thunk(void);
+extern void srso_fam19_return_thunk(void);
extern void retbleed_untrain_ret(void);
extern void srso_untrain_ret(void);
-extern void srso_alias_untrain_ret(void);
+extern void srso_fam19_untrain_ret(void);
extern void entry_untrain_ret(void);
extern void entry_ibpb(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index f081d26616ac..92bec0d719ce 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -2464,7 +2464,7 @@ static void __init srso_select_mitigation(void)
if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
- x86_return_thunk = srso_alias_return_thunk;
+ x86_return_thunk = srso_fam19_return_thunk;
} else {
setup_force_cpu_cap(X86_FEATURE_SRSO);
x86_return_thunk = srso_return_thunk;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 83d41c2601d7..c9b6f8b83187 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -147,10 +147,10 @@ SECTIONS
#ifdef CONFIG_CPU_SRSO
/*
- * See the comment above srso_alias_untrain_ret()'s
+ * See the comment above srso_fam19_untrain_ret()'s
* definition.
*/
- . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
+ . = srso_fam19_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
*(.text..__x86.rethunk_safe)
#endif
ALIGN_ENTRY_TEXT_END
@@ -536,8 +536,8 @@ INIT_PER_CPU(irq_stack_backing_store);
* Instead do: (A | B) - (A & B) in order to compute the XOR
* of the two function addresses:
*/
-. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
- (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
+. = ASSERT(((ABSOLUTE(srso_fam19_untrain_ret) | srso_fam19_safe_ret) -
+ (ABSOLUTE(srso_fam19_untrain_ret) & srso_fam19_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
"SRSO function pair won't alias");
#endif
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index cd86aeb5fdd3..772757ea26a7 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -133,58 +133,58 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
#ifdef CONFIG_RETHUNK
/*
- * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
+ * srso_fam19_untrain_ret() and srso_fam19_safe_ret() are placed at
* special addresses:
*
- * - srso_alias_untrain_ret() is 2M aligned
- * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
+ * - srso_fam19_untrain_ret() is 2M aligned
+ * - srso_fam19_safe_ret() is also in the same 2M page but bits 2, 8, 14
* and 20 in its virtual address are set (while those bits in the
- * srso_alias_untrain_ret() function are cleared).
+ * srso_fam19_untrain_ret() function are cleared).
*
* This guarantees that those two addresses will alias in the branch
* target buffer of Zen3/4 generations, leading to any potential
* poisoned entries at that BTB slot to get evicted.
*
- * As a result, srso_alias_safe_ret() becomes a safe return.
+ * As a result, srso_fam19_safe_ret() becomes a safe return.
*/
#ifdef CONFIG_CPU_SRSO
.section .text..__x86.rethunk_untrain
-SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+SYM_START(srso_fam19_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ASM_NOP2
lfence
- jmp srso_alias_return_thunk
-SYM_FUNC_END(srso_alias_untrain_ret)
-__EXPORT_THUNK(srso_alias_untrain_ret)
+ jmp srso_fam19_return_thunk
+SYM_FUNC_END(srso_fam19_untrain_ret)
+__EXPORT_THUNK(srso_fam19_untrain_ret)
.section .text..__x86.rethunk_safe
#else
/* dummy definition for alternatives */
-SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+SYM_START(srso_fam19_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_UNRET_SAFE
ret
int3
-SYM_FUNC_END(srso_alias_untrain_ret)
+SYM_FUNC_END(srso_fam19_untrain_ret)
#endif
-SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
+SYM_START(srso_fam19_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
lea 8(%_ASM_SP), %_ASM_SP
UNWIND_HINT_FUNC
ANNOTATE_UNRET_SAFE
ret
int3
-SYM_FUNC_END(srso_alias_safe_ret)
+SYM_FUNC_END(srso_fam19_safe_ret)
.section .text..__x86.return_thunk
-SYM_CODE_START(srso_alias_return_thunk)
+SYM_CODE_START(srso_fam19_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
- call srso_alias_safe_ret
+ call srso_fam19_safe_ret
ud2
-SYM_CODE_END(srso_alias_return_thunk)
+SYM_CODE_END(srso_fam19_return_thunk)
/*
* Some generic notes on the untraining sequences:
@@ -311,7 +311,7 @@ SYM_CODE_END(srso_return_thunk)
SYM_FUNC_START(entry_untrain_ret)
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
- "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+ "jmp srso_fam19_untrain_ret", X86_FEATURE_SRSO_ALIAS
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
--
2.30.2
Powered by blists - more mailing lists