lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220125091453.1475246-4-ardb@kernel.org>
Date:   Tue, 25 Jan 2022 10:14:48 +0100
From:   Ard Biesheuvel <ardb@...nel.org>
To:     linux@...linux.org.uk, linux-arm-kernel@...ts.infradead.org
Cc:     linux-hardening@...r.kernel.org, Ard Biesheuvel <ardb@...nel.org>,
        Arnd Bergmann <arnd@...db.de>,
        Kees Cook <keescook@...omium.org>,
        Keith Packard <keithpac@...zon.com>,
        Linus Walleij <linus.walleij@...aro.org>,
        Nick Desaulniers <ndesaulniers@...gle.com>,
        Marc Zyngier <maz@...nel.org>
Subject: [PATCH v6 3/8] ARM: smp: elide HWCAP_TLS checks or __entry_task updates on SMP+v6

Use the SMP_ON_UP patching framework to elide HWCAP_TLS tests from the
context switch and return to userspace code paths, as SMP systems are
guaranteed to have this h/w capability.

At the same time, omit the update of __entry_task if the system is
detected to be UP at runtime, as in that case, the value is never used.

Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
---
 arch/arm/include/asm/switch_to.h |  4 ++--
 arch/arm/include/asm/tls.h       | 22 ++++++++++++++------
 arch/arm/kernel/entry-header.S   | 17 +++++++--------
 3 files changed, 25 insertions(+), 18 deletions(-)

diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index a482c99934ff..f67ae946a3c6 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -3,6 +3,7 @@
 #define __ASM_ARM_SWITCH_TO_H
 
 #include <linux/thread_info.h>
+#include <asm/smp_plat.h>
 
 /*
  * For v7 SMP cores running a preemptible kernel we may be pre-empted
@@ -40,8 +41,7 @@ static inline void set_ti_cpu(struct task_struct *p)
 do {									\
 	__complete_pending_tlbi();					\
 	set_ti_cpu(next);						\
-	if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) ||		\
-	    IS_ENABLED(CONFIG_SMP))					\
+	if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || is_smp())	\
 		__this_cpu_write(__entry_task, next);			\
 	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
 } while (0)
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index d712c170c095..3dcd0f71a0da 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -18,22 +18,32 @@
 	.endm
 
 	.macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
+#ifdef CONFIG_SMP
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+	.subsection 1
+#endif
+.L0_\@:
 	ldr_va	\tmp1, elf_hwcap
 	mov	\tmp2, #0xffff0fff
 	tst	\tmp1, #HWCAP_TLS		@ hardware TLS available?
 	streq	\tp, [\tmp2, #-15]		@ set TLS value at 0xffff0ff0
-	mrcne	p15, 0, \tmp2, c13, c0, 2	@ get the user r/w register
-#ifndef CONFIG_SMP
-	mcrne	p15, 0, \tp, c13, c0, 3		@ yes, set TLS register
+	beq	.L2_\@
+	mcr	p15, 0, \tp, c13, c0, 3		@ yes, set TLS register
+#ifdef CONFIG_SMP
+	b	.L1_\@
+	.previous
 #endif
-	mcrne	p15, 0, \tpuser, c13, c0, 2	@ set user r/w register
-	strne	\tmp2, [\base, #TI_TP_VALUE + 4] @ save it
+.L1_\@: switch_tls_v6k \base, \tp, \tpuser, \tmp1, \tmp2
+.L2_\@:
 	.endm
 
 	.macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
 	mov	\tmp1, #0xffff0fff
 	str	\tp, [\tmp1, #-15]		@ set TLS value at 0xffff0ff0
 	.endm
+#else
+#include <asm/smp_plat.h>
 #endif
 
 #ifdef CONFIG_TLS_REG_EMUL
@@ -44,7 +54,7 @@
 #elif defined(CONFIG_CPU_V6)
 #define tls_emu		0
 #define has_tls_reg		(elf_hwcap & HWCAP_TLS)
-#define defer_tls_reg_update	IS_ENABLED(CONFIG_SMP)
+#define defer_tls_reg_update	is_smp()
 #define switch_tls	switch_tls_v6
 #elif defined(CONFIG_CPU_32v6K)
 #define tls_emu		0
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index cb82ff5adec1..9a1dc142f782 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -292,21 +292,18 @@
 
 
 	.macro	restore_user_regs, fast = 0, offset = 0
-#if defined(CONFIG_CPU_32v6K) || defined(CONFIG_SMP)
-#if defined(CONFIG_CPU_V6) && defined(CONFIG_SMP)
-ALT_SMP(b	.L1_\@	)
-ALT_UP( nop		)
-	ldr_va	r1, elf_hwcap
-	tst	r1, #HWCAP_TLS			@ hardware TLS available?
-	beq	.L2_\@
-.L1_\@:
+#if defined(CONFIG_CPU_32v6K) && \
+    (!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
+#ifdef CONFIG_CPU_V6
+ALT_SMP(nop)
+ALT_UP_B(.L1_\@)
 #endif
 	@ The TLS register update is deferred until return to user space so we
 	@ can use it for other things while running in the kernel
-	get_thread_info r1
+	mrc	p15, 0, r1, c13, c0, 3		@ get current_thread_info pointer
 	ldr	r1, [r1, #TI_TP_VALUE]
 	mcr	p15, 0, r1, c13, c0, 3		@ set TLS register
-.L2_\@:
+.L1_\@:
 #endif
 
 	uaccess_enable r1, isb=0
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ