[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171108194738.CD2F1F09@viggo.jf.intel.com>
Date: Wed, 08 Nov 2017 11:47:38 -0800
From: Dave Hansen <dave.hansen@...ux.intel.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, dave.hansen@...ux.intel.com,
moritz.lipp@...k.tugraz.at, daniel.gruss@...k.tugraz.at,
michael.schwarz@...k.tugraz.at, richard.fellner@...dent.tugraz.at,
luto@...nel.org, torvalds@...ux-foundation.org,
keescook@...gle.com, hughd@...gle.com, x86@...nel.org
Subject: [PATCH 28/30] x86, kaiser: allow KAISER to be enabled/disabled at runtime
From: Dave Hansen <dave.hansen@...ux.intel.com>
The KAISER CR3 switches are expensive for many reasons. Not all systems
benefit from the protection provided by KAISER. Some of them can not
pay the high performance cost.
This patch adds a debugfs file. To disable KAISER, you do:
echo 0 > /sys/kernel/debug/x86/kaiser-enabled
and to reenable it, you can:
echo 1 > /sys/kernel/debug/x86/kaiser-enabled
This is a *minimal* implementation. There are certainly plenty of
optimizations that we can do on top of this by using ALTERNATIVES
among other things.
This does, however, completely remove all the KAISER-based CR3 writes.
So, a paravirtualized system that can not tolerate CR3 writes can
theretically survive with CONFIG_KAISER=y, but with
/sys/kernel/debug/x86/kaiser-enabled=0.
Signed-off-by: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Moritz Lipp <moritz.lipp@...k.tugraz.at>
Cc: Daniel Gruss <daniel.gruss@...k.tugraz.at>
Cc: Michael Schwarz <michael.schwarz@...k.tugraz.at>
Cc: Richard Fellner <richard.fellner@...dent.tugraz.at>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Kees Cook <keescook@...gle.com>
Cc: Hugh Dickins <hughd@...gle.com>
Cc: x86@...nel.org
---
b/arch/x86/entry/calling.h | 12 +++++++
b/arch/x86/mm/kaiser.c | 70 ++++++++++++++++++++++++++++++++++++++++++---
2 files changed, 78 insertions(+), 4 deletions(-)
diff -puN arch/x86/entry/calling.h~kaiser-dynamic-asm arch/x86/entry/calling.h
--- a/arch/x86/entry/calling.h~kaiser-dynamic-asm 2017-11-08 10:45:41.361681365 -0800
+++ b/arch/x86/entry/calling.h 2017-11-08 10:45:41.366681365 -0800
@@ -208,19 +208,29 @@ For 32-bit we have the following convent
orq $(KAISER_SWITCH_MASK), \reg
.endm
+.macro JUMP_IF_KAISER_OFF label
+ testq $1, kaiser_asm_do_switch
+ jz \label
+.endm
+
.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+ JUMP_IF_KAISER_OFF .Lswitch_done_\@
mov %cr3, \scratch_reg
ADJUST_KERNEL_CR3 \scratch_reg
mov \scratch_reg, %cr3
+.Lswitch_done_\@:
.endm
.macro SWITCH_TO_USER_CR3 scratch_reg:req
+ JUMP_IF_KAISER_OFF .Lswitch_done_\@
mov %cr3, \scratch_reg
ADJUST_USER_CR3 \scratch_reg
mov \scratch_reg, %cr3
+.Lswitch_done_\@:
.endm
.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+ JUMP_IF_KAISER_OFF .Ldone_\@
movq %cr3, %r\scratch_reg
movq %r\scratch_reg, \save_reg
/*
@@ -243,11 +253,13 @@ For 32-bit we have the following convent
.endm
.macro RESTORE_CR3 save_reg:req
+ JUMP_IF_KAISER_OFF .Ldone_\@
/*
* We could avoid the CR3 write if not changing its value,
* but that requires a CR3 read *and* a scratch register.
*/
movq \save_reg, %cr3
+.Ldone_\@:
.endm
#else /* CONFIG_KAISER=n: */
diff -puN arch/x86/mm/kaiser.c~kaiser-dynamic-asm arch/x86/mm/kaiser.c
--- a/arch/x86/mm/kaiser.c~kaiser-dynamic-asm 2017-11-08 10:45:41.363681365 -0800
+++ b/arch/x86/mm/kaiser.c 2017-11-08 10:45:41.367681365 -0800
@@ -31,6 +31,9 @@
#include <asm/tlbflush.h>
#include <asm/desc.h>
+__aligned(PAGE_SIZE)
+unsigned long kaiser_asm_do_switch[PAGE_SIZE/sizeof(unsigned long)] = { 1 };
+
/*
* At runtime, the only things we map are some things for CPU
* hotplug, and stacks for new processes. No two CPUs will ever
@@ -355,6 +358,9 @@ void __init kaiser_init(void)
kaiser_init_all_pgds();
+ kaiser_add_user_map_early(&kaiser_asm_do_switch, PAGE_SIZE,
+ __PAGE_KERNEL | _PAGE_GLOBAL);
+
for_each_possible_cpu(cpu) {
void *percpu_vaddr = __per_cpu_user_mapped_start +
per_cpu_offset(cpu);
@@ -459,6 +465,56 @@ static ssize_t kaiser_enabled_read_file(
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
+enum poison {
+ KAISER_POISON,
+ KAISER_UNPOISON
+};
+void kaiser_poison_pgds(enum poison do_poison);
+
+void kaiser_do_disable(void)
+{
+ /* Make sure the kernel PGDs are usable by userspace: */
+ kaiser_poison_pgds(KAISER_UNPOISON);
+
+ /*
+ * Make sure all the CPUs have the poison clear in their TLBs.
+ * This also functions as a barrier to ensure that everyone
+ * sees the unpoisoned PGDs.
+ */
+ flush_tlb_all();
+
+ /* Tell the assembly code to stop switching CR3. */
+ kaiser_asm_do_switch[0] = 0;
+
+ /*
+ * Make sure everybody does an interrupt. This means that
+ * they have gone through a SWITCH_TO_KERNEL_CR3 amd are no
+ * longer running on the userspace CR3. If we did not do
+ * this, we might have CPUs running on the shadow page tables
+ * that then enter the kernel and think they do *not* need to
+ * switch.
+ */
+ flush_tlb_all();
+}
+
+void kaiser_do_enable(void)
+{
+ /* Tell the assembly code to start switching CR3: */
+ kaiser_asm_do_switch[0] = 1;
+
+ /* Make sure everyone can see the kaiser_asm_do_switch update: */
+ synchronize_rcu();
+
+ /*
+ * Now that userspace is no longer using the kernel copy of
+ * the page tables, we can poison it:
+ */
+ kaiser_poison_pgds(KAISER_POISON);
+
+ /* Make sure all the CPUs see the poison: */
+ flush_tlb_all();
+}
+
static ssize_t kaiser_enabled_write_file(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
@@ -480,7 +536,17 @@ static ssize_t kaiser_enabled_write_file
if (kaiser_enabled == enable)
return count;
+ /*
+ * This tells the page table code to stop poisoning PGDs
+ */
WRITE_ONCE(kaiser_enabled, enable);
+ synchronize_rcu();
+
+ if (enable)
+ kaiser_do_enable();
+ else
+ kaiser_do_disable();
+
return count;
}
@@ -498,10 +564,6 @@ static int __init create_kaiser_enabled(
}
late_initcall(create_kaiser_enabled);
-enum poison {
- KAISER_POISON,
- KAISER_UNPOISON
-};
void kaiser_poison_pgd_page(pgd_t *pgd_page, enum poison do_poison)
{
int i = 0;
_
Powered by blists - more mailing lists