[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171126145553.67lipggq7cdihuhw@gmail.com>
Date: Sun, 26 Nov 2017 15:55:53 +0100
From: Ingo Molnar <mingo@...nel.org>
To: Borislav Petkov <bp@...en8.de>
Cc: linux-kernel@...r.kernel.org,
Dave Hansen <dave.hansen@...ux.intel.com>,
Andy Lutomirski <luto@...capital.net>,
Thomas Gleixner <tglx@...utronix.de>,
"H . Peter Anvin" <hpa@...or.com>,
Peter Zijlstra <peterz@...radead.org>,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: [PATCH v2] x86/mm/kaiser: Prepare the x86/entry assembly code for
entry/exit CR3 switching
* Borislav Petkov <bp@...en8.de> wrote:
> On Fri, Nov 24, 2017 at 06:23:50PM +0100, Ingo Molnar wrote:
> > diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
> > index 3fd8bc560fae..e1650da01323 100644
> > --- a/arch/x86/entry/calling.h
> > +++ b/arch/x86/entry/calling.h
> > @@ -1,6 +1,7 @@
> > /* SPDX-License-Identifier: GPL-2.0 */
> > #include <linux/jump_label.h>
> > #include <asm/unwind_hints.h>
> > +#include <asm/cpufeatures.h>
> >
> > /*
> >
> > @@ -187,6 +188,70 @@ For 32-bit we have the following conventions - kernel is built with
> > #endif
> > .endm
> >
> > +#ifdef CONFIG_KAISER
> > +
> > +/* KAISER PGDs are 8k. Flip bit 12 to switch between the two halves: */
> > +#define KAISER_SWITCH_MASK (1<<PAGE_SHIFT)
>
> Btw, entry_64.o doesn't build when at this patch if you force-enable
> CONFIG_KAISER by doing
>
> #define CONFIG_KAISER
>
> above it.
>
> arch/x86/entry/entry_64.S: Assembler messages:
> arch/x86/entry/entry_64.S:210: Error: invalid operands (*ABS* and *UND* sections) for `<<'
> arch/x86/entry/entry_64.S:406: Error: invalid operands (*ABS* and *UND* sections) for `<<'
> arch/x86/entry/entry_64.S:743: Error: invalid operands (*ABS* and *UND* sections) for `<<'
> arch/x86/entry/entry_64.S:955: Error: invalid operands (*ABS* and *UND* sections) for `<<'
> ...
>
> due to the missing PAGE_SHIFT definition in asm.
>
> I'm assuming that is resolved later - not that it breaks bisection...
>
> > +.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
> > + movq %cr3, %r\scratch_reg
> > + movq %r\scratch_reg, \save_reg
>
> What happened to making it uniform so that that macro can be invoked
> like this:
>
> SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax ...
>
> instead of "splitting" the arg?
>
> IOW, hunk below builds here, and asm looks correct:
>
> 14bf: 31 db xor %ebx,%ebx
> 14c1: 0f 20 d8 mov %cr3,%rax
> 14c4: 49 89 c6 mov %rax,%r14
> 14c7: 48 a9 00 10 00 00 test $0x1000,%rax
> 14cd: 74 09 je 14d8 <paranoid_entry+0x78>
> 14cf: 48 25 ff ef ff ff and $0xffffffffffffefff,%rax
> 14d5: 0f 22 d8 mov %rax,%cr3
>
> ---
> diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
> index e1650da01323..d528f7060774 100644
> --- a/arch/x86/entry/calling.h
> +++ b/arch/x86/entry/calling.h
> @@ -188,10 +188,12 @@ For 32-bit we have the following conventions - kernel is built with
> #endif
> .endm
>
> +#define CONFIG_KAISER
> +
> #ifdef CONFIG_KAISER
>
> /* KAISER PGDs are 8k. Flip bit 12 to switch between the two halves: */
> -#define KAISER_SWITCH_MASK (1<<PAGE_SHIFT)
> +#define KAISER_SWITCH_MASK (1<<12)
>
> .macro ADJUST_KERNEL_CR3 reg:req
> /* Clear "KAISER bit", point CR3 at kernel pagetables: */
> @@ -216,17 +218,17 @@ For 32-bit we have the following conventions - kernel is built with
> .endm
>
> .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
> - movq %cr3, %r\scratch_reg
> - movq %r\scratch_reg, \save_reg
> + movq %cr3, \scratch_reg
> + movq \scratch_reg, \save_reg
> /*
> * Is the switch bit zero? This means the address is
> * up in real KAISER patches in a moment.
> */
> - testq $(KAISER_SWITCH_MASK), %r\scratch_reg
> + testq $(KAISER_SWITCH_MASK), \scratch_reg
> jz .Ldone_\@
>
> - ADJUST_KERNEL_CR3 %r\scratch_reg
> - movq %r\scratch_reg, %cr3
> + ADJUST_KERNEL_CR3 \scratch_reg
> + movq \scratch_reg, %cr3
>
> .Ldone_\@:
> .endm
> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
> index 4ac952080869..5a15d0852b2f 100644
> --- a/arch/x86/entry/entry_64.S
> +++ b/arch/x86/entry/entry_64.S
> @@ -1256,7 +1256,7 @@ ENTRY(paranoid_entry)
> xorl %ebx, %ebx
>
> 1:
> - SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=ax save_reg=%r14
> + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
>
> ret
> END(paranoid_entry)
>
> > diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
> > index 34e3110b0876..4ac952080869 100644
> > --- a/arch/x86/entry/entry_64.S
> > +++ b/arch/x86/entry/entry_64.S
> > @@ -168,6 +168,9 @@ ENTRY(entry_SYSCALL_64_trampoline)
> > /* Stash the user RSP. */
> > movq %rsp, RSP_SCRATCH
> >
> > + /* Note: using %rsp as a scratch reg. */
>
> Haha, yap, it just got freed :)
>
> > + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
> > +
> > /* Load the top of the task stack into RSP */
> > movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
> >
> > @@ -198,6 +201,13 @@ ENTRY(entry_SYSCALL_64)
> >
> > swapgs
> > movq %rsp, PER_CPU_VAR(rsp_scratch)
>
> <---- newline here.
>
> > + /*
> > + * The kernel CR3 is needed to map the process stack, but we
> > + * need a scratch register to be able to load CR3. %rsp is
> > + * clobberable right now, so use it as a scratch register.
> > + * %rsp will be look crazy here for a couple instructions.
>
> s/be // or "will be looking crazy" :-)
>
> > + */
> > + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
>
> Now, this is questionable: we did enter through the trampoline
> entry_SYSCALL_64_trampoline so theoretically, we wouldn't need to switch
> to CR3 here again because, well, we did already.
>
> I.e., entry_SYSCALL_64 is not going to be called anymore. Unless we will
> jump to it when we decide to jump over the trampolines in the kaiser
> disabled case. Just pointing it out here so that we don't forget to deal
> with this...
>
> > @@ -1239,7 +1254,11 @@ ENTRY(paranoid_entry)
> > js 1f /* negative -> in kernel */
> > SWAPGS
> > xorl %ebx, %ebx
> > -1: ret
> > +
> > +1:
> > + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=ax save_reg=%r14
> > +
> > + ret
> > END(paranoid_entry)
> >
> > /*
> > @@ -1261,6 +1280,7 @@ ENTRY(paranoid_exit)
> > testl %ebx, %ebx /* swapgs needed? */
> > jnz .Lparanoid_exit_no_swapgs
> > TRACE_IRQS_IRETQ
> > + RESTORE_CR3 %r14
>
> RESTORE_CR3 save_reg=%r14
>
> like the other invocation below.
>
> But if the runtime disable gets changed to a boottime one, you don't
> need that macro anymore.
Ok, I've incorporated all these fixes into the latest version - attached below.
[ Also added your Reviewed-by tag, to save a step in the best-case scenario ;-) ]
Thanks,
Ingo
===================>
Subject: x86/mm/kaiser: Prepare the x86/entry assembly code for entry/exit CR3 switching
From: Dave Hansen <dave.hansen@...ux.intel.com>
Date: Wed, 22 Nov 2017 16:34:42 -0800
From: Dave Hansen <dave.hansen@...ux.intel.com>
This is largely code from Andy Lutomirski. I fixed a few bugs
in it, and added a few SWITCH_TO_* spots.
KAISER needs to switch to a different CR3 value when it enters
the kernel and switch back when it exits. This essentially
needs to be done before leaving assembly code.
This is extra challenging because the switching context is
tricky: the registers that can be clobbered can vary. It is also
hard to store things on the stack because there is an established
ABI (ptregs) or the stack is entirely unsafe to use.
This patch establishes a set of macros that allow changing to
the user and kernel CR3 values.
Interactions with SWAPGS: previous versions of the KAISER code
relied on having per-CPU scratch space to save/restore a register
that can be used for the CR3 MOV. The %GS register is used to
index into our per-CPU space, so SWAPGS *had* to be done before
the CR3 switch. That scratch space is gone now, but the semantic
that SWAPGS must be done before the CR3 MOV is retained. This is
good to keep because it is not that hard to do and it allows us
to do things like add per-CPU debugging information to help us
figure out what goes wrong sometimes.
What this does in the NMI code is worth pointing out. NMIs
can interrupt *any* context and they can also be nested with
NMIs interrupting other NMIs. The comments below
".Lnmi_from_kernel" explain the format of the stack during this
situation. Changing the format of this stack is not a fun
exercise: I tried. Instead of storing the old CR3 value on the
stack, this patch depend on the *regular* register save/restore
mechanism and then uses %r14 to keep CR3 during the NMI. It is
callee-saved and will not be clobbered by the C NMI handlers that
get called.
Signed-off-by: Dave Hansen <dave.hansen@...ux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: daniel.gruss@...k.tugraz.at
Cc: hughd@...gle.com
Cc: keescook@...gle.com
Cc: linux-mm@...ck.org
Cc: luto@...nel.org
Cc: michael.schwarz@...k.tugraz.at
Cc: moritz.lipp@...k.tugraz.at
Cc: richard.fellner@...dent.tugraz.at
Link: https://lkml.kernel.org/r/20171123003442.2D047A7D@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
arch/x86/entry/calling.h | 65 +++++++++++++++++++++++++++++++++++++++
arch/x86/entry/entry_64.S | 38 +++++++++++++++++++++-
arch/x86/entry/entry_64_compat.S | 24 +++++++++++++-
3 files changed, 123 insertions(+), 4 deletions(-)
Index: tip/arch/x86/entry/calling.h
===================================================================
--- tip.orig/arch/x86/entry/calling.h
+++ tip/arch/x86/entry/calling.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/jump_label.h>
#include <asm/unwind_hints.h>
+#include <asm/cpufeatures.h>
/*
@@ -187,6 +188,70 @@ For 32-bit we have the following convent
#endif
.endm
+#ifdef CONFIG_KAISER
+
+/* KAISER PGDs are 8k. Flip bit 12 to switch between the two halves: */
+#define KAISER_SWITCH_MASK (1<<PAGE_SHIFT)
+
+.macro ADJUST_KERNEL_CR3 reg:req
+ /* Clear "KAISER bit", point CR3 at kernel pagetables: */
+ andq $(~KAISER_SWITCH_MASK), \reg
+.endm
+
+.macro ADJUST_USER_CR3 reg:req
+ /* Move CR3 up a page to the user page tables: */
+ orq $(KAISER_SWITCH_MASK), \reg
+.endm
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+ mov %cr3, \scratch_reg
+ ADJUST_KERNEL_CR3 \scratch_reg
+ mov \scratch_reg, %cr3
+.endm
+
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+ mov %cr3, \scratch_reg
+ ADJUST_USER_CR3 \scratch_reg
+ mov \scratch_reg, %cr3
+.endm
+
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+ movq %cr3, %r\scratch_reg
+ movq %r\scratch_reg, \save_reg
+ /*
+ * Is the switch bit zero? This means the address is
+ * up in real KAISER patches in a moment.
+ */
+ testq $(KAISER_SWITCH_MASK), %r\scratch_reg
+ jz .Ldone_\@
+
+ ADJUST_KERNEL_CR3 %r\scratch_reg
+ movq %r\scratch_reg, %cr3
+
+.Ldone_\@:
+.endm
+
+.macro RESTORE_CR3 save_reg:req
+ /*
+ * The CR3 write could be avoided when not changing its value,
+ * but would require a CR3 read *and* a scratch register.
+ */
+ movq \save_reg, %cr3
+.endm
+
+#else /* CONFIG_KAISER=n: */
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+.endm
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+.endm
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+.endm
+.macro RESTORE_CR3 save_reg:req
+.endm
+
+#endif
+
#endif /* CONFIG_X86_64 */
/*
Index: tip/arch/x86/entry/entry_64.S
===================================================================
--- tip.orig/arch/x86/entry/entry_64.S
+++ tip/arch/x86/entry/entry_64.S
@@ -168,6 +168,9 @@ ENTRY(entry_SYSCALL_64_trampoline)
/* Stash the user RSP. */
movq %rsp, RSP_SCRATCH
+ /* Note: using %rsp as a scratch reg. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
/* Load the top of the task stack into RSP */
movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
@@ -198,6 +201,14 @@ ENTRY(entry_SYSCALL_64)
swapgs
movq %rsp, PER_CPU_VAR(rsp_scratch)
+
+ /*
+ * The kernel CR3 is needed to map the process stack, but we
+ * need a scratch register to be able to load CR3. %rsp is
+ * clobberable right now, so use it as a scratch register.
+ * %rsp will look crazy here for a couple instructions.
+ */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
/* Construct struct pt_regs on stack */
@@ -393,6 +404,7 @@ syscall_return_via_sysret:
* We are on the trampoline stack. All regs except RDI are live.
* We can do future final exit work right here.
*/
+ SWITCH_TO_USER_CR3 scratch_reg=%rdi
popq %rdi
popq %rsp
@@ -729,6 +741,8 @@ GLOBAL(swapgs_restore_regs_and_return_to
* We can do future final exit work right here.
*/
+ SWITCH_TO_USER_CR3 scratch_reg=%rdi
+
/* Restore RDI. */
popq %rdi
SWAPGS
@@ -938,6 +952,8 @@ ENTRY(switch_to_thread_stack)
UNWIND_HINT_FUNC
pushq %rdi
+ /* Need to switch before accessing the thread stack. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
movq %rsp, %rdi
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
@@ -1239,7 +1255,11 @@ ENTRY(paranoid_entry)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx, %ebx
-1: ret
+
+1:
+ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=ax save_reg=%r14
+
+ ret
END(paranoid_entry)
/*
@@ -1261,6 +1281,7 @@ ENTRY(paranoid_exit)
testl %ebx, %ebx /* swapgs needed? */
jnz .Lparanoid_exit_no_swapgs
TRACE_IRQS_IRETQ
+ RESTORE_CR3 save_reg=%r14
SWAPGS_UNSAFE_STACK
jmp .Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
@@ -1288,6 +1309,8 @@ ENTRY(error_entry)
* from user mode due to an IRET fault.
*/
SWAPGS
+ /* We have user CR3. Change to kernel CR3. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
.Lerror_entry_from_usermode_after_swapgs:
/* Put us onto the real thread stack. */
@@ -1334,6 +1357,7 @@ ENTRY(error_entry)
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
jmp .Lerror_entry_done
.Lbstep_iret:
@@ -1343,10 +1367,11 @@ ENTRY(error_entry)
.Lerror_bad_iret:
/*
- * We came from an IRET to user mode, so we have user gsbase.
- * Switch to kernel gsbase:
+ * We came from an IRET to user mode, so we have user
+ * gsbase and CR3. Switch to kernel gsbase and CR3:
*/
SWAPGS
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
/*
* Pretend that the exception came from user mode: set up pt_regs
@@ -1378,6 +1403,10 @@ END(error_exit)
/*
* Runs on exception stack. Xen PV does not go through this path at all,
* so we can use real assembly here.
+ *
+ * Registers:
+ * %r14: Used to save/restore the CR3 of the interrupted context
+ * when KAISER is in use. Do not clobber.
*/
ENTRY(nmi)
UNWIND_HINT_IRET_REGS
@@ -1441,6 +1470,7 @@ ENTRY(nmi)
swapgs
cld
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
UNWIND_HINT_IRET_REGS base=%rdx offset=8
@@ -1693,6 +1723,8 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi
+ RESTORE_CR3 save_reg=%r14
+
testl %ebx, %ebx /* swapgs needed? */
jnz nmi_restore
nmi_swapgs:
Index: tip/arch/x86/entry/entry_64_compat.S
===================================================================
--- tip.orig/arch/x86/entry/entry_64_compat.S
+++ tip/arch/x86/entry/entry_64_compat.S
@@ -49,6 +49,10 @@
ENTRY(entry_SYSENTER_compat)
/* Interrupts are off on entry. */
SWAPGS
+
+ /* We are about to clobber %rsp anyway, clobbering here is OK */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
/*
@@ -216,6 +220,12 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram
pushq $0 /* pt_regs->r15 = 0 */
/*
+ * We just saved %rdi so it is safe to clobber. It is not
+ * preserved during the C calls inside TRACE_IRQS_OFF anyway.
+ */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+
+ /*
* User mode is traced as though IRQs are on, and SYSENTER
* turned them off.
*/
@@ -256,10 +266,22 @@ sysret32_from_system_call:
* when the system call started, which is already known to user
* code. We zero R8-R10 to avoid info leaks.
*/
+ movq RSP-ORIG_RAX(%rsp), %rsp
+
+ /*
+ * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored
+ * on the process stack which is not mapped to userspace and
+ * not readable after we SWITCH_TO_USER_CR3. Delay the CR3
+ * switch until after after the last reference to the process
+ * stack.
+ *
+ * %r8 is zeroed before the sysret, thus safe to clobber.
+ */
+ SWITCH_TO_USER_CR3 scratch_reg=%r8
+
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
- movq RSP-ORIG_RAX(%rsp), %rsp
swapgs
sysretl
END(entry_SYSCALL_compat)
Powered by blists - more mailing lists