[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201109112319.264511-15-alexandre.chartre@oracle.com>
Date: Mon, 9 Nov 2020 12:23:09 +0100
From: Alexandre Chartre <alexandre.chartre@...cle.com>
To: "tglx@...utronix.de"@userv0122.oracle.com,
"mingo@...hat.com"@userv0122.oracle.com,
"bp@...en8.de"@userv0122.oracle.com,
"hpa@...or.com"@userv0122.oracle.com,
"x86@...nel.org"@userv0122.oracle.com,
"dave.hansen@...ux.intel.com"@userv0122.oracle.com,
"luto@...nel.org"@userv0122.oracle.com,
"peterz@...radead.org"@userv0122.oracle.com,
"linux-kernel@...r.kernel.org"@userv0122.oracle.com,
"thomas.lendacky@....com"@userv0122.oracle.com,
"jroedel@...e.de"@userv0122.oracle.com
Cc: "konrad.wilk@...cle.com"@userv0122.oracle.com,
"jan.setjeeilers@...cle.com"@userv0122.oracle.com,
"junaids@...gle.com"@userv0122.oracle.com,
"oweisse@...gle.com"@userv0122.oracle.com,
"rppt@...ux.vnet.ibm.com"@userv0122.oracle.com,
"graf@...zon.de"@userv0122.oracle.com,
"mgross@...ux.intel.com"@userv0122.oracle.com,
"kuzuno@...il.com"@userv0122.oracle.com,
"alexandre.chartre@...cle.com"@userv0122.oracle.com
Subject: [RFC][PATCH 14/24] x86/pti: Use PTI stack instead of trampoline stack
When entering the kernel from userland, use the per-task PTI stack
instead of the per-cpu trampoline stack. Like the trampoline stack,
the PTI stack is mapped both in the kernel and in the user page-table.
Using a per-task stack which is mapped into the kernel and the user
page-table instead of a per-cpu stack will allow executing more code
before switching to the kernel stack and to the kernel page-table.
Additional changes will be made to later to switch to the kernel stack
(which is only mapped in the kernel page-table).
Signed-off-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
arch/x86/entry/entry_64.S | 42 +++++++++-----------------------
arch/x86/include/asm/pti.h | 8 ++++++
arch/x86/include/asm/switch_to.h | 7 +++++-
3 files changed, 26 insertions(+), 31 deletions(-)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 458af12ed9a1..29beab46bedd 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -194,19 +194,9 @@ syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
POP_REGS pop_rdi=0 skip_r11rcx=1
- /*
- * Now all regs are restored except RSP and RDI.
- * Save old stack pointer and switch to trampoline stack.
- */
- movq %rsp, %rdi
- movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
- UNWIND_HINT_EMPTY
-
- pushq RSP-RDI(%rdi) /* RSP */
- pushq (%rdi) /* RDI */
-
/*
* We are on the trampoline stack. All regs except RDI are live.
+ * We are on the trampoline stack. All regs except RSP are live.
* We can do future final exit work right here.
*/
STACKLEAK_ERASE_NOCLOBBER
@@ -214,7 +204,7 @@ syscall_return_via_sysret:
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
popq %rdi
- popq %rsp
+ movq RSP-ORIG_RAX(%rsp), %rsp
USERGS_SYSRET64
SYM_CODE_END(entry_SYSCALL_64)
@@ -606,24 +596,6 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
#endif
POP_REGS pop_rdi=0
- /*
- * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
- * Save old stack pointer and switch to trampoline stack.
- */
- movq %rsp, %rdi
- movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
- UNWIND_HINT_EMPTY
-
- /* Copy the IRET frame to the trampoline stack. */
- pushq 6*8(%rdi) /* SS */
- pushq 5*8(%rdi) /* RSP */
- pushq 4*8(%rdi) /* EFLAGS */
- pushq 3*8(%rdi) /* CS */
- pushq 2*8(%rdi) /* RIP */
-
- /* Push user RDI on the trampoline stack. */
- pushq (%rdi)
-
/*
* We are on the trampoline stack. All regs except RDI are live.
* We can do future final exit work right here.
@@ -634,6 +606,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
/* Restore RDI. */
popq %rdi
+ addq $8, %rsp /* skip regs->orig_ax */
SWAPGS
INTERRUPT_RETURN
@@ -1062,6 +1035,15 @@ SYM_CODE_START_LOCAL(error_entry)
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
.Lerror_entry_from_usermode_after_swapgs:
+ /*
+ * We are on the trampoline stack. With PTI, the trampoline
+ * stack is a per-thread stack so we are all set and we can
+ * return.
+ *
+ * Without PTI, the trampoline stack is a per-cpu stack and
+ * we need to switch to the normal thread stack.
+ */
+ ALTERNATIVE "", "ret", X86_FEATURE_PTI
/* Put us onto the real thread stack. */
popq %r12 /* save return addr in %12 */
movq %rsp, %rdi /* arg0 = pt_regs pointer */
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
index 5484e69ff8d3..ed211fcc3a50 100644
--- a/arch/x86/include/asm/pti.h
+++ b/arch/x86/include/asm/pti.h
@@ -17,8 +17,16 @@ extern void pti_check_boottime_disable(void);
extern void pti_finalize(void);
extern void pti_clone_pgtable(struct mm_struct *mm, unsigned long start,
unsigned long end, enum pti_clone_level level);
+static inline bool pti_enabled(void)
+{
+ return static_cpu_has(X86_FEATURE_PTI);
+}
#else
static inline void pti_check_boottime_disable(void) { }
+static inline bool pti_enabled(void)
+{
+ return false;
+}
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 9f69cc497f4b..457458228462 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -3,6 +3,7 @@
#define _ASM_X86_SWITCH_TO_H
#include <linux/sched/task_stack.h>
+#include <asm/pti.h>
struct task_struct; /* one of the stranger aspects of C forward declarations */
@@ -76,8 +77,12 @@ static inline void update_task_stack(struct task_struct *task)
* doesn't work on x86-32 because sp1 and
* cpu_current_top_of_stack have different values (because of
* the non-zero stack-padding on 32bit).
+ *
+ * If PTI is enabled, sp0 points to the PTI stack (mapped in
+ * the kernel and user page-table) which is used when entering
+ * the kernel.
*/
- if (static_cpu_has(X86_FEATURE_XENPV))
+ if (static_cpu_has(X86_FEATURE_XENPV) || pti_enabled())
load_sp0(task_top_of_stack(task));
#endif
}
--
2.18.4
Powered by blists - more mailing lists