lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211105171820.568721583@infradead.org>
Date:   Fri, 05 Nov 2021 18:10:25 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     x86@...nel.org
Cc:     linux-kernel@...r.kernel.org, peterz@...radead.org,
        jpoimboe@...hat.com, mark.rutland@....com, dvyukov@...gle.com,
        seanjc@...gle.com, pbonzini@...hat.com, mbenes@...e.cz
Subject: [PATCH 02/22] x86,mmx_32: Remove .fixup usage

This code puts an exception table entry on the "PREFIX" instruction to
overwrite it with a jmp.d8 when it triggers an exception. Except of
course, our code is no longer writable, also SMP.

Replace it with ALTERNATIVE, the novel

XXX: arguably we should just delete this code

Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 arch/x86/lib/mmx_32.c |   86 +++++++++++++++++---------------------------------
 1 file changed, 30 insertions(+), 56 deletions(-)

--- a/arch/x86/lib/mmx_32.c
+++ b/arch/x86/lib/mmx_32.c
@@ -50,23 +50,18 @@ void *_mmx_memcpy(void *to, const void *
 	kernel_fpu_begin_mask(KFPU_387);
 
 	__asm__ __volatile__ (
-		"1: prefetch (%0)\n"		/* This set is 28 bytes */
-		"   prefetch 64(%0)\n"
-		"   prefetch 128(%0)\n"
-		"   prefetch 192(%0)\n"
-		"   prefetch 256(%0)\n"
-		"2:  \n"
-		".section .fixup, \"ax\"\n"
-		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
-		"   jmp 2b\n"
-		".previous\n"
-			_ASM_EXTABLE(1b, 3b)
-			: : "r" (from));
+		ALTERNATIVE "",
+			    "prefetch (%0)\n"
+			    "prefetch 64(%0)\n"
+			    "prefetch 128(%0)\n"
+			    "prefetch 192(%0)\n"
+			    "prefetch 256(%0)\n", X86_FEATURE_3DNOWPREFETCH
+		: : "r" (from));
 
 	for ( ; i > 5; i--) {
 		__asm__ __volatile__ (
-		"1:  prefetch 320(%0)\n"
-		"2:  movq (%0), %%mm0\n"
+		ALTERNATIVE "", "prefetch 320(%0)\n", X86_FEATURE_3DNOWPREFETCH
+		"  movq (%0), %%mm0\n"
 		"  movq 8(%0), %%mm1\n"
 		"  movq 16(%0), %%mm2\n"
 		"  movq 24(%0), %%mm3\n"
@@ -82,11 +77,6 @@ void *_mmx_memcpy(void *to, const void *
 		"  movq %%mm1, 40(%1)\n"
 		"  movq %%mm2, 48(%1)\n"
 		"  movq %%mm3, 56(%1)\n"
-		".section .fixup, \"ax\"\n"
-		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
-		"   jmp 2b\n"
-		".previous\n"
-			_ASM_EXTABLE(1b, 3b)
 			: : "r" (from), "r" (to) : "memory");
 
 		from += 64;
@@ -177,22 +167,18 @@ static void fast_copy_page(void *to, voi
 	 * but that is for later. -AV
 	 */
 	__asm__ __volatile__(
-		"1: prefetch (%0)\n"
-		"   prefetch 64(%0)\n"
-		"   prefetch 128(%0)\n"
-		"   prefetch 192(%0)\n"
-		"   prefetch 256(%0)\n"
-		"2:  \n"
-		".section .fixup, \"ax\"\n"
-		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
-		"   jmp 2b\n"
-		".previous\n"
-			_ASM_EXTABLE(1b, 3b) : : "r" (from));
+		ALTERNATIVE "",
+			    "prefetch (%0)\n"
+			    "prefetch 64(%0)\n"
+			    "prefetch 128(%0)\n"
+			    "prefetch 192(%0)\n"
+			    "prefetch 256(%0)\n", X86_FEATURE_3DNOWPREFETCH
+		: : "r" (from));
 
 	for (i = 0; i < (4096-320)/64; i++) {
 		__asm__ __volatile__ (
-		"1: prefetch 320(%0)\n"
-		"2: movq (%0), %%mm0\n"
+		ALTERNATIVE "", "prefetch 320(%0)\n", X86_FEATURE_3DNOWPREFETCH
+		"   movq (%0), %%mm0\n"
 		"   movntq %%mm0, (%1)\n"
 		"   movq 8(%0), %%mm1\n"
 		"   movntq %%mm1, 8(%1)\n"
@@ -208,11 +194,7 @@ static void fast_copy_page(void *to, voi
 		"   movntq %%mm6, 48(%1)\n"
 		"   movq 56(%0), %%mm7\n"
 		"   movntq %%mm7, 56(%1)\n"
-		".section .fixup, \"ax\"\n"
-		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
-		"   jmp 2b\n"
-		".previous\n"
-		_ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
+			: : "r" (from), "r" (to) : "memory");
 
 		from += 64;
 		to += 64;
@@ -220,7 +202,7 @@ static void fast_copy_page(void *to, voi
 
 	for (i = (4096-320)/64; i < 4096/64; i++) {
 		__asm__ __volatile__ (
-		"2: movq (%0), %%mm0\n"
+		"   movq (%0), %%mm0\n"
 		"   movntq %%mm0, (%1)\n"
 		"   movq 8(%0), %%mm1\n"
 		"   movntq %%mm1, 8(%1)\n"
@@ -237,6 +219,7 @@ static void fast_copy_page(void *to, voi
 		"   movq 56(%0), %%mm7\n"
 		"   movntq %%mm7, 56(%1)\n"
 			: : "r" (from), "r" (to) : "memory");
+
 		from += 64;
 		to += 64;
 	}
@@ -295,22 +278,18 @@ static void fast_copy_page(void *to, voi
 	kernel_fpu_begin_mask(KFPU_387);
 
 	__asm__ __volatile__ (
-		"1: prefetch (%0)\n"
-		"   prefetch 64(%0)\n"
-		"   prefetch 128(%0)\n"
-		"   prefetch 192(%0)\n"
-		"   prefetch 256(%0)\n"
-		"2:  \n"
-		".section .fixup, \"ax\"\n"
-		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
-		"   jmp 2b\n"
-		".previous\n"
-			_ASM_EXTABLE(1b, 3b) : : "r" (from));
+		ALTERNATIVE "",
+			    "prefetch (%0)\n"
+			    "prefetch 64(%0)\n"
+			    "prefetch 128(%0)\n"
+			    "prefetch 192(%0)\n"
+			    "prefetch 256(%0)\n", X86_FEATURE_3DNOWPREFETCH
+		: : "r" (from));
 
 	for (i = 0; i < 4096/64; i++) {
 		__asm__ __volatile__ (
-		"1: prefetch 320(%0)\n"
-		"2: movq (%0), %%mm0\n"
+		ALTERNATIVE "", "prefetch 320(%0)\n", X86_FEATURE_3DNOWPREFETCH
+		"   movq (%0), %%mm0\n"
 		"   movq 8(%0), %%mm1\n"
 		"   movq 16(%0), %%mm2\n"
 		"   movq 24(%0), %%mm3\n"
@@ -326,11 +305,6 @@ static void fast_copy_page(void *to, voi
 		"   movq %%mm1, 40(%1)\n"
 		"   movq %%mm2, 48(%1)\n"
 		"   movq %%mm3, 56(%1)\n"
-		".section .fixup, \"ax\"\n"
-		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
-		"   jmp 2b\n"
-		".previous\n"
-			_ASM_EXTABLE(1b, 3b)
 			: : "r" (from), "r" (to) : "memory");
 
 		from += 64;


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ