[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-45e876f794e8e566bf827c25ef0791875081724f@git.kernel.org>
Date: Fri, 29 Apr 2016 03:49:29 -0700
From: tip-bot for Andy Lutomirski <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, mingo@...nel.org,
peterz@...radead.org, brgerst@...il.com,
torvalds@...ux-foundation.org, dvlasenk@...hat.com,
luto@...capital.net, tglx@...utronix.de, hpa@...or.com,
bp@...en8.de, luto@...nel.org
Subject: [tip:x86/asm] x86/segments/64: When loadsegment(fs, ...) fails,
clear the base
Commit-ID: 45e876f794e8e566bf827c25ef0791875081724f
Gitweb: http://git.kernel.org/tip/45e876f794e8e566bf827c25ef0791875081724f
Author: Andy Lutomirski <luto@...nel.org>
AuthorDate: Tue, 26 Apr 2016 12:23:26 -0700
Committer: Ingo Molnar <mingo@...nel.org>
CommitDate: Fri, 29 Apr 2016 11:56:41 +0200
x86/segments/64: When loadsegment(fs, ...) fails, clear the base
On AMD CPUs, a failed loadsegment currently may not clear the FS
base. Fix it.
While we're at it, prevent loadsegment(gs, xyz) from even compiling
on 64-bit kernels. It shouldn't be used.
Signed-off-by: Andy Lutomirski <luto@...nel.org>
Cc: Andy Lutomirski <luto@...capital.net>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Brian Gerst <brgerst@...il.com>
Cc: Denys Vlasenko <dvlasenk@...hat.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Link: http://lkml.kernel.org/r/a084c1b93b7b1408b58d3fd0b5d6e47da8e7d7cf.1461698311.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
arch/x86/include/asm/segment.h | 42 +++++++++++++++++++++++++++++++++++++++---
arch/x86/kernel/cpu/common.c | 2 +-
arch/x86/mm/extable.c | 10 ++++++++++
3 files changed, 50 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 7d5a192..e1a4afd 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -2,6 +2,7 @@
#define _ASM_X86_SEGMENT_H
#include <linux/const.h>
+#include <asm/alternative.h>
/*
* Constructor for a conventional segment GDT (or LDT) entry.
@@ -249,10 +250,13 @@ extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDL
#endif
/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
+ * Load a segment. Fall back on loading the zero segment if something goes
+ * wrong. This variant assumes that loading zero fully clears the segment.
+ * This is always the case on Intel CPUs and, even on 64-bit AMD CPUs, any
+ * failure to fully clear the cached descriptor is only observable for
+ * FS and GS.
*/
-#define loadsegment(seg, value) \
+#define __loadsegment_simple(seg, value) \
do { \
unsigned short __val = (value); \
\
@@ -269,6 +273,38 @@ do { \
: "+r" (__val) : : "memory"); \
} while (0)
+#define __loadsegment_ss(value) __loadsegment_simple(ss, (value))
+#define __loadsegment_ds(value) __loadsegment_simple(ds, (value))
+#define __loadsegment_es(value) __loadsegment_simple(es, (value))
+
+#ifdef CONFIG_X86_32
+
+/*
+ * On 32-bit systems, the hidden parts of FS and GS are unobservable if
+ * the selector is NULL, so there's no funny business here.
+ */
+#define __loadsegment_fs(value) __loadsegment_simple(fs, (value))
+#define __loadsegment_gs(value) __loadsegment_simple(gs, (value))
+
+#else
+
+static inline void __loadsegment_fs(unsigned short value)
+{
+ asm volatile(" \n"
+ "1: movw %0, %%fs \n"
+ "2: \n"
+
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_clear_fs)
+
+ : : "rm" (value) : "memory");
+}
+
+/* __loadsegment_gs is intentionally undefined. Use load_gs_index instead. */
+
+#endif
+
+#define loadsegment(seg, value) __loadsegment_ ## seg (value)
+
/*
* Save a segment register away:
*/
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6bfa36d..0881061 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -430,7 +430,7 @@ void load_percpu_segment(int cpu)
#ifdef CONFIG_X86_32
loadsegment(fs, __KERNEL_PERCPU);
#else
- loadsegment(gs, 0);
+ __loadsegment_simple(gs, 0);
wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
#endif
load_stack_canary_segment();
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index aaeda3f..4bb53b8 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -70,6 +70,16 @@ bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
+bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
+{
+ if (static_cpu_has(X86_BUG_NULL_SEG))
+ asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
+ asm volatile ("mov %0, %%fs" : : "rm" (0));
+ return ex_handler_default(fixup, regs, trapnr);
+}
+EXPORT_SYMBOL(ex_handler_clear_fs);
+
bool ex_has_fault_handler(unsigned long ip)
{
const struct exception_table_entry *e;
Powered by blists - more mailing lists