lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sun, 21 Jun 2015 00:47:15 +0300
From:	Alexey Dobriyan <adobriyan@...il.com>
To:	hpa@...or.com, mingo@...nel.org
Cc:	x86@...nel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 2/2] x86: fix incomplete clear by clear_user()

clear_user() used MOVQ+MOVB and if MOVQ faults, code simply exits and
honestly returns remaining length. In case of unaligned area, unaligned
remainder would count towards return value (correctly) but not cleared
(lazy code at least):

	clear_user(p + 4096 - 4, 8) = 8

No one would have noticed but REP MOVSB addition to clear_user()
repertoire creates a problem: REP MOVSB does everything correctly,
clears and counts to the last possible byte, but REP STOSQ and MOVQ
variants DO NOT:

	MOVQ		clear_user(p + 4096 - 4, 8) = 8
	REP STOSQ	clear_user(p + 4096 - 4, 8) = 8
	REP STOSB	clear_user(p + 4096 - 4, 8) = 4

Patch fixes incomplete clear on 32-bit and 64-bit REP STOSQ, MOVQ.

Signed-off-by: Alexey Dobriyan <adobriyan@...il.com>
---

 arch/x86/lib/clear_user_64.S |    9 ++++++---
 arch/x86/lib/usercopy_32.c   |    3 ++-
 2 files changed, 8 insertions(+), 4 deletions(-)

--- a/arch/x86/lib/clear_user_64.S
+++ b/arch/x86/lib/clear_user_64.S
@@ -26,7 +26,8 @@ ENTRY(__clear_user)
 
 	.section .fixup,"ax"
 4:	lea	(%rsi,%rcx,8),%rcx
-	jmp	3b
+	# Fill as much as possible with byte stores.
+	jmp	2b
 	.previous
 
 	_ASM_EXTABLE(1b,4b)
@@ -57,7 +58,8 @@ ENTRY(__clear_user_movq)
 3:
 	movb	$0, (%rdi)
 	add	$1, %rdi
-	sub	$1, %ecx
+	# Unaligned area and 4GB+ tail after recovery require RCX here.
+	sub	$1, %rcx
 	jnz	3b
 4:
 	mov	%rcx, %rax
@@ -66,7 +68,8 @@ ENTRY(__clear_user_movq)
 
 	.section .fixup,"ax"
 5:	lea	(%rsi,%rcx,8),%rcx
-	jmp	4b
+	# Fill as much as possible with byte stores.
+	jmp	3b
 	.previous
 
 	_ASM_EXTABLE(1b,5b)
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -49,7 +49,8 @@ do {									\
 		"2: " ASM_CLAC "\n"					\
 		".section .fixup,\"ax\"\n"				\
 		"3:	lea 0(%2,%0,4),%0\n"				\
-		"	jmp 2b\n"					\
+		/* Fill as much as possible with byte stores. */	\
+		"	jmp 1b\n"					\
 		".previous\n"						\
 		_ASM_EXTABLE(0b,3b)					\
 		_ASM_EXTABLE(1b,2b)					\
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ