lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-46eabca284f9f27fe94698a068a4a21ba93b4975@git.kernel.org>
Date:   Thu, 19 Jul 2018 16:20:27 -0700
From:   tip-bot for Joerg Roedel <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     gregkh@...uxfoundation.org, hpa@...or.com, dave.hansen@...el.com,
        eduval@...zon.com, aarcange@...hat.com, dhgutteridge@...patico.ca,
        brgerst@...il.com, jgross@...e.com, mingo@...nel.org,
        peterz@...radead.org, David.Laight@...lab.com, luto@...nel.org,
        will.deacon@....com, linux-kernel@...r.kernel.org, jroedel@...e.de,
        torvalds@...ux-foundation.org, dvlasenk@...hat.com,
        tglx@...utronix.de, jkosina@...e.cz, bp@...en8.de,
        jpoimboe@...hat.com, llong@...hat.com, boris.ostrovsky@...cle.com,
        pavel@....cz
Subject: [tip:x86/pti] x86/entry/32: Put ESPFIX code into a macro

Commit-ID:  46eabca284f9f27fe94698a068a4a21ba93b4975
Gitweb:     https://git.kernel.org/tip/46eabca284f9f27fe94698a068a4a21ba93b4975
Author:     Joerg Roedel <jroedel@...e.de>
AuthorDate: Wed, 18 Jul 2018 11:40:41 +0200
Committer:  Thomas Gleixner <tglx@...utronix.de>
CommitDate: Fri, 20 Jul 2018 01:11:36 +0200

x86/entry/32: Put ESPFIX code into a macro

This makes it easier to split up the shared iret code path.

Signed-off-by: Joerg Roedel <jroedel@...e.de>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Tested-by: Pavel Machek <pavel@....cz>
Cc: "H . Peter Anvin" <hpa@...or.com>
Cc: linux-mm@...ck.org
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Dave Hansen <dave.hansen@...el.com>
Cc: Josh Poimboeuf <jpoimboe@...hat.com>
Cc: Juergen Gross <jgross@...e.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Jiri Kosina <jkosina@...e.cz>
Cc: Boris Ostrovsky <boris.ostrovsky@...cle.com>
Cc: Brian Gerst <brgerst@...il.com>
Cc: David Laight <David.Laight@...lab.com>
Cc: Denys Vlasenko <dvlasenk@...hat.com>
Cc: Eduardo Valentin <eduval@...zon.com>
Cc: Greg KH <gregkh@...uxfoundation.org>
Cc: Will Deacon <will.deacon@....com>
Cc: aliguori@...zon.com
Cc: daniel.gruss@...k.tugraz.at
Cc: hughd@...gle.com
Cc: keescook@...gle.com
Cc: Andrea Arcangeli <aarcange@...hat.com>
Cc: Waiman Long <llong@...hat.com>
Cc: "David H . Gutteridge" <dhgutteridge@...patico.ca>
Cc: joro@...tes.org
Link: https://lkml.kernel.org/r/1531906876-13451-5-git-send-email-joro@8bytes.org

---
 arch/x86/entry/entry_32.S | 97 ++++++++++++++++++++++++-----------------------
 1 file changed, 49 insertions(+), 48 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 39f711ad0fc9..ef7d65315e8e 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -221,6 +221,54 @@
 	POP_GS_EX
 .endm
 
+.macro CHECK_AND_APPLY_ESPFIX
+#ifdef CONFIG_X86_ESPFIX32
+#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
+
+	ALTERNATIVE	"jmp .Lend_\@", "", X86_BUG_ESPFIX
+
+	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
+	/*
+	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+	 * are returning to the kernel.
+	 * See comments in process.c:copy_thread() for details.
+	 */
+	movb	PT_OLDSS(%esp), %ah
+	movb	PT_CS(%esp), %al
+	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
+	jne	.Lend_\@	# returning to user-space with LDT SS
+
+	/*
+	 * Setup and switch to ESPFIX stack
+	 *
+	 * We're returning to userspace with a 16 bit stack. The CPU will not
+	 * restore the high word of ESP for us on executing iret... This is an
+	 * "official" bug of all the x86-compatible CPUs, which we can work
+	 * around to make dosemu and wine happy. We do this by preloading the
+	 * high word of ESP with the high word of the userspace ESP while
+	 * compensating for the offset by changing to the ESPFIX segment with
+	 * a base address that matches for the difference.
+	 */
+	mov	%esp, %edx			/* load kernel esp */
+	mov	PT_OLDESP(%esp), %eax		/* load userspace esp */
+	mov	%dx, %ax			/* eax: new kernel esp */
+	sub	%eax, %edx			/* offset (low word is 0) */
+	shr	$16, %edx
+	mov	%dl, GDT_ESPFIX_SS + 4		/* bits 16..23 */
+	mov	%dh, GDT_ESPFIX_SS + 7		/* bits 24..31 */
+	pushl	$__ESPFIX_SS
+	pushl	%eax				/* new kernel esp */
+	/*
+	 * Disable interrupts, but do not irqtrace this section: we
+	 * will soon execute iret and the tracer was already set to
+	 * the irqstate after the IRET:
+	 */
+	DISABLE_INTERRUPTS(CLBR_ANY)
+	lss	(%esp), %esp			/* switch to espfix segment */
+.Lend_\@:
+#endif /* CONFIG_X86_ESPFIX32 */
+.endm
 /*
  * %eax: prev task
  * %edx: next task
@@ -547,21 +595,7 @@ ENTRY(entry_INT80_32)
 restore_all:
 	TRACE_IRQS_IRET
 .Lrestore_all_notrace:
-#ifdef CONFIG_X86_ESPFIX32
-	ALTERNATIVE	"jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
-
-	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
-	/*
-	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
-	 * are returning to the kernel.
-	 * See comments in process.c:copy_thread() for details.
-	 */
-	movb	PT_OLDSS(%esp), %ah
-	movb	PT_CS(%esp), %al
-	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
-	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
-	je .Lldt_ss				# returning to user-space with LDT SS
-#endif
+	CHECK_AND_APPLY_ESPFIX
 .Lrestore_nocheck:
 	RESTORE_REGS 4				# skip orig_eax/error_code
 .Lirq_return:
@@ -579,39 +613,6 @@ ENTRY(iret_exc	)
 	jmp	common_exception
 .previous
 	_ASM_EXTABLE(.Lirq_return, iret_exc)
-
-#ifdef CONFIG_X86_ESPFIX32
-.Lldt_ss:
-/*
- * Setup and switch to ESPFIX stack
- *
- * We're returning to userspace with a 16 bit stack. The CPU will not
- * restore the high word of ESP for us on executing iret... This is an
- * "official" bug of all the x86-compatible CPUs, which we can work
- * around to make dosemu and wine happy. We do this by preloading the
- * high word of ESP with the high word of the userspace ESP while
- * compensating for the offset by changing to the ESPFIX segment with
- * a base address that matches for the difference.
- */
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
-	mov	%esp, %edx			/* load kernel esp */
-	mov	PT_OLDESP(%esp), %eax		/* load userspace esp */
-	mov	%dx, %ax			/* eax: new kernel esp */
-	sub	%eax, %edx			/* offset (low word is 0) */
-	shr	$16, %edx
-	mov	%dl, GDT_ESPFIX_SS + 4		/* bits 16..23 */
-	mov	%dh, GDT_ESPFIX_SS + 7		/* bits 24..31 */
-	pushl	$__ESPFIX_SS
-	pushl	%eax				/* new kernel esp */
-	/*
-	 * Disable interrupts, but do not irqtrace this section: we
-	 * will soon execute iret and the tracer was already set to
-	 * the irqstate after the IRET:
-	 */
-	DISABLE_INTERRUPTS(CLBR_ANY)
-	lss	(%esp), %esp			/* switch to espfix segment */
-	jmp	.Lrestore_nocheck
-#endif
 ENDPROC(entry_INT80_32)
 
 .macro FIXUP_ESPFIX_STACK

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ