[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200527073107.2127-4-laijs@linux.alibaba.com>
Date: Wed, 27 May 2020 07:31:05 +0000
From: Lai Jiangshan <laijs@...ux.alibaba.com>
To: linux-kernel@...r.kernel.org
Cc: Lai Jiangshan <laijs@...ux.alibaba.com>,
Andy Lutomirski <luto@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>,
Peter Zijlstra <peterz@...radead.org>,
Alexandre Chartre <alexandre.chartre@...cle.com>,
"Eric W. Biederman" <ebiederm@...ssion.com>,
Jann Horn <jannh@...gle.com>,
Dave Hansen <dave.hansen@...ux.intel.com>
Subject: [PATCH 3/5] x86/entry: directly switch to kernel stack when .Lerror_bad_iret
Directly copy pt_regs to kernel stack when .Lerror_bad_iret.
Directly switch to kernel stack when .Lerror_bad_iret.
We can see that entry_64.S do the following things back to back
when .Lerror_bad_iret:
call fixup_bad_iret(), switch to sp0 stack with pt_regs copied
call sync_regs(), switch to kernel stack with pt_regs copied
So we can do the all things together in fixup_bad_iret().
After this patch, fixup_bad_iret() is restored to the behavior before
7f2590a110b8("x86/entry/64: Use a per-CPU trampoline stack for IDT entries")
Signed-off-by: Lai Jiangshan <laijs@...ux.alibaba.com>
---
arch/x86/entry/entry_64.S | 13 ++-----------
arch/x86/kernel/traps.c | 9 ++++-----
2 files changed, 6 insertions(+), 16 deletions(-)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index e8817ae31390..c5db048e5bed 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1329,16 +1329,6 @@ SYM_CODE_START_LOCAL(error_entry)
ENCODE_FRAME_POINTER 8
ret
-.Lerror_entry_from_usermode_after_swapgs:
- /* Put us onto the real thread stack. */
- popq %r12 /* save return addr in %12 */
- movq %rsp, %rdi /* arg0 = pt_regs pointer */
- call sync_regs
- movq %rax, %rsp /* switch stack */
- ENCODE_FRAME_POINTER
- pushq %r12
- ret
-
.Lerror_entry_done_lfence:
FENCE_SWAPGS_KERNEL_ENTRY
.Lerror_entry_done:
@@ -1392,7 +1382,8 @@ SYM_CODE_START_LOCAL(error_entry)
mov %rsp, %rdi
call fixup_bad_iret
mov %rax, %rsp
- jmp .Lerror_entry_from_usermode_after_swapgs
+ ENCODE_FRAME_POINTER 8
+ ret
SYM_CODE_END(error_entry)
SYM_CODE_START_LOCAL(error_exit)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9e5d81cb94ba..3bef95934644 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -666,13 +666,12 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
/*
* This is called from entry_64.S early in handling a fault
* caused by a bad iret to user mode. To handle the fault
- * correctly, we want to move our stack frame to where it would
- * be had we entered directly on the entry stack (rather than
- * just below the IRET frame) and we want to pretend that the
- * exception came from the IRET target.
+ * correctly, we want to move our stack frame to kernel stack
+ * (rather than just below the IRET frame) and we want to
+ * pretend that the exception came from the IRET target.
*/
struct bad_iret_stack tmp, *new_stack =
- (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
+ (struct bad_iret_stack *)__this_cpu_read(cpu_current_top_of_stack) - 1;
/* Copy the IRET target to the temporary storage. */
memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
--
2.20.1
Powered by blists - more mailing lists