[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191127202656.187310680@linuxfoundation.org>
Date: Wed, 27 Nov 2019 21:32:17 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Andy Lutomirski <luto@...nel.org>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>, stable@...nel.org
Subject: [PATCH 5.4 22/66] x86/entry/32: Unwind the ESPFIX stack earlier on exception entry
From: Andy Lutomirski <luto@...nel.org>
commit a1a338e5b6fe9e0a39c57c232dc96c198bb53e47 upstream.
Right now, we do some fancy parts of the exception entry path while SS
might have a nonzero base: we fill in regs->ss and regs->sp, and we
consider switching to the kernel stack. This results in regs->ss and
regs->sp referring to a non-flat stack and it may result in
overflowing the entry stack. The former issue means that we can try to
call iret_exc on a non-flat stack, which doesn't work.
Tested with selftests/x86/sigreturn_32.
Fixes: 45d7b255747c ("x86/entry/32: Enter the kernel via trampoline stack")
Signed-off-by: Andy Lutomirski <luto@...nel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: stable@...nel.org
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
arch/x86/entry/entry_32.S | 30 ++++++++++++++++--------------
1 file changed, 16 insertions(+), 14 deletions(-)
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -210,8 +210,6 @@
/*
* The high bits of the CS dword (__csh) are used for CS_FROM_*.
* Clear them in case hardware didn't do this for us.
- *
- * Be careful: we may have nonzero SS base due to ESPFIX.
*/
andl $0x0000ffff, 4*4(%esp)
@@ -307,12 +305,21 @@
.Lfinished_frame_\@:
.endm
-.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0
+.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
cld
.if \skip_gs == 0
PUSH_GS
.endif
pushl %fs
+
+ pushl %eax
+ movl $(__KERNEL_PERCPU), %eax
+ movl %eax, %fs
+.if \unwind_espfix > 0
+ UNWIND_ESPFIX_STACK
+.endif
+ popl %eax
+
FIXUP_FRAME
pushl %es
pushl %ds
@@ -326,8 +333,6 @@
movl $(__USER_DS), %edx
movl %edx, %ds
movl %edx, %es
- movl $(__KERNEL_PERCPU), %edx
- movl %edx, %fs
.if \skip_gs == 0
SET_KERNEL_GS %edx
.endif
@@ -1153,18 +1158,17 @@ ENDPROC(entry_INT80_32)
lss (%esp), %esp /* switch to the normal stack segment */
#endif
.endm
+
.macro UNWIND_ESPFIX_STACK
+ /* It's safe to clobber %eax, all other regs need to be preserved */
#ifdef CONFIG_X86_ESPFIX32
movl %ss, %eax
/* see if on espfix stack */
cmpw $__ESPFIX_SS, %ax
- jne 27f
- movl $__KERNEL_DS, %eax
- movl %eax, %ds
- movl %eax, %es
+ jne .Lno_fixup_\@
/* switch to normal stack */
FIXUP_ESPFIX_STACK
-27:
+.Lno_fixup_\@:
#endif
.endm
@@ -1458,10 +1462,9 @@ END(page_fault)
common_exception_read_cr2:
/* the function address is in %gs's slot on the stack */
- SAVE_ALL switch_stacks=1 skip_gs=1
+ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
ENCODE_FRAME_POINTER
- UNWIND_ESPFIX_STACK
/* fixup %gs */
GS_TO_REG %ecx
@@ -1483,9 +1486,8 @@ END(common_exception_read_cr2)
common_exception:
/* the function address is in %gs's slot on the stack */
- SAVE_ALL switch_stacks=1 skip_gs=1
+ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
ENCODE_FRAME_POINTER
- UNWIND_ESPFIX_STACK
/* fixup %gs */
GS_TO_REG %ecx
Powered by blists - more mailing lists