lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230821112723.3995187-3-andrew.cooper3@citrix.com>
Date:   Mon, 21 Aug 2023 12:27:21 +0100
From:   Andrew Cooper <andrew.cooper3@...rix.com>
To:     LKML <linux-kernel@...r.kernel.org>
CC:     Andrew Cooper <andrew.cooper3@...rix.com>, <x86@...nel.org>,
        "Borislav Petkov" <bp@...en8.de>,
        Peter Zijlstra <peterz@...radead.org>,
        Josh Poimboeuf <jpoimboe@...nel.org>,
        Babu Moger <babu.moger@....com>, <David.Kaplan@....com>,
        Nikolay Borisov <nik.borisov@...e.com>,
        <gregkh@...uxfoundation.org>, Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 2/4] x86/srso: Rename fam17 SRSO infrastructure to srso_fam17_*()

The naming is inconsistent.  Rename it to fam17 to state the microarchitecture
it is applicable to, and to mirror the srso_fam19_*() change.

Signed-off-by: Andrew Cooper <andrew.cooper3@...rix.com>
---
CC: x86@...nel.org
CC: linux-kernel@...r.kernel.org
CC: Borislav Petkov <bp@...en8.de>
CC: Peter Zijlstra <peterz@...radead.org>
CC: Josh Poimboeuf <jpoimboe@...nel.org>
CC: Babu Moger <babu.moger@....com>
CC: David.Kaplan@....com
CC: Nikolay Borisov <nik.borisov@...e.com>
CC: gregkh@...uxfoundation.org
CC: Thomas Gleixner <tglx@...utronix.de>
---
 arch/x86/include/asm/nospec-branch.h |  4 ++--
 arch/x86/kernel/cpu/bugs.c           |  2 +-
 arch/x86/kernel/vmlinux.lds.S        |  2 +-
 arch/x86/lib/retpoline.S             | 32 ++++++++++++++--------------
 4 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 93e8de0bf94e..a4c686bc4b1f 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -349,11 +349,11 @@ static inline void __x86_return_thunk(void) {}
 #endif
 
 extern void retbleed_return_thunk(void);
-extern void srso_return_thunk(void);
+extern void srso_fam17_return_thunk(void);
 extern void srso_fam19_return_thunk(void);
 
 extern void retbleed_untrain_ret(void);
-extern void srso_untrain_ret(void);
+extern void srso_fam17_untrain_ret(void);
 extern void srso_fam19_untrain_ret(void);
 
 extern void entry_untrain_ret(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92bec0d719ce..893d14a9f282 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -2467,7 +2467,7 @@ static void __init srso_select_mitigation(void)
 				x86_return_thunk = srso_fam19_return_thunk;
 			} else {
 				setup_force_cpu_cap(X86_FEATURE_SRSO);
-				x86_return_thunk = srso_return_thunk;
+				x86_return_thunk = srso_fam17_return_thunk;
 			}
 			srso_mitigation = SRSO_MITIGATION_SAFE_RET;
 		} else {
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index c9b6f8b83187..127ccdbf6d95 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -522,7 +522,7 @@ INIT_PER_CPU(irq_stack_backing_store);
 
 #ifdef CONFIG_RETHUNK
 . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
-. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+. = ASSERT((srso_fam17_safe_ret & 0x3f) == 0, "srso_fam17_safe_ret not cacheline-aligned");
 #endif
 
 #ifdef CONFIG_CPU_SRSO
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 772757ea26a7..d8732ae21122 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -194,13 +194,13 @@ SYM_CODE_END(srso_fam19_return_thunk)
  *
  * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
  * Retbleed sequence because the return sequence done there
- * (srso_safe_ret()) is longer and the return sequence must fully nest
+ * (srso_fam17_safe_ret()) is longer and the return sequence must fully nest
  * (end before) the untraining sequence. Therefore, the untraining
  * sequence must fully overlap the return sequence.
  *
  * Regarding alignment - the instructions which need to be untrained,
  * must all start at a cacheline boundary for Zen1/2 generations. That
- * is, instruction sequences starting at srso_safe_ret() and
+ * is, instruction sequences starting at srso_fam17_safe_ret() and
  * the respective instruction sequences at retbleed_return_thunk()
  * must start at a cacheline boundary.
  */
@@ -268,49 +268,49 @@ __EXPORT_THUNK(retbleed_untrain_ret)
 
 /*
  * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
- * above. On kernel entry, srso_untrain_ret() is executed which is a
+ * above. On kernel entry, srso_fam17_untrain_ret() is executed which is a
  *
  * movabs $0xccccc30824648d48,%rax
  *
- * and when the return thunk executes the inner label srso_safe_ret()
+ * and when the return thunk executes the inner label srso_fam17_safe_ret()
  * later, it is a stack manipulation and a RET which is mispredicted and
  * thus a "safe" one to use.
  */
 	.align 64
-	.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
-SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+	.skip 64 - (srso_fam17_safe_ret - srso_fam17_untrain_ret), 0xcc
+SYM_START(srso_fam17_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
 	ANNOTATE_NOENDBR
 	.byte 0x48, 0xb8
 
 /*
  * This forces the function return instruction to speculate into a trap
- * (UD2 in srso_return_thunk() below).  This RET will then mispredict
+ * (UD2 in srso_fam17_return_thunk() below).  This RET will then mispredict
  * and execution will continue at the return site read from the top of
  * the stack.
  */
-SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+SYM_INNER_LABEL(srso_fam17_safe_ret, SYM_L_GLOBAL)
 	lea 8(%_ASM_SP), %_ASM_SP
 	ret
 	int3
 	int3
 	/* end of movabs */
 	lfence
-	call srso_safe_ret
+	call srso_fam17_safe_ret
 	ud2
-SYM_CODE_END(srso_safe_ret)
-SYM_FUNC_END(srso_untrain_ret)
-__EXPORT_THUNK(srso_untrain_ret)
+SYM_CODE_END(srso_fam17_safe_ret)
+SYM_FUNC_END(srso_fam17_untrain_ret)
+__EXPORT_THUNK(srso_fam17_untrain_ret)
 
-SYM_CODE_START(srso_return_thunk)
+SYM_CODE_START(srso_fam17_return_thunk)
 	UNWIND_HINT_FUNC
 	ANNOTATE_NOENDBR
-	call srso_safe_ret
+	call srso_fam17_safe_ret
 	ud2
-SYM_CODE_END(srso_return_thunk)
+SYM_CODE_END(srso_fam17_return_thunk)
 
 SYM_FUNC_START(entry_untrain_ret)
 	ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
-		      "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
+		      "jmp srso_fam17_untrain_ret", X86_FEATURE_SRSO, \
 		      "jmp srso_fam19_untrain_ret", X86_FEATURE_SRSO_ALIAS
 SYM_FUNC_END(entry_untrain_ret)
 __EXPORT_THUNK(entry_untrain_ret)
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ