[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1537312139-5580-3-git-send-email-chang.seok.bae@intel.com>
Date: Tue, 18 Sep 2018 16:08:53 -0700
From: "Chang S. Bae" <chang.seok.bae@...el.com>
To: Ingo Molnar <mingo@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Andy Lutomirski <luto@...nel.org>,
"H . Peter Anvin" <hpa@...or.com>
Cc: Andi Kleen <ak@...ux.intel.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Markus T Metzger <markus.t.metzger@...el.com>,
Ravi Shankar <ravi.v.shankar@...el.com>,
"Chang S . Bae" <chang.seok.bae@...el.com>,
LKML <linux-kernel@...r.kernel.org>
Subject: [PATCH v6 2/8] x86/fsgsbase/64: Introduce FS/GS base helper functions
With new helpers, FS/GS base access is centralized.
Eventually, when FSGSBASE instruction enabled, it will
be faster.
"inactive" GS base refers to base backed up at kernel
entries and of inactive (user) task's.
task_seg_base() is renamed to x86_fsgsbase_read_task() and
moved out to kernel/process_64.c, where the helper functions
are implemented as closely coupled. When next patch makes
ptrace to use the helpers, it won't be directly accessed
from ptrace.
Based-on-code-from: Andy Lutomirski <luto@...nel.org>
Suggested-by: Ingo Molnar <mingo@...nel.org>
Signed-off-by: Chang S. Bae <chang.seok.bae@...el.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Andi Kleen <ak@...ux.intel.com>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
---
arch/x86/include/asm/fsgsbase.h | 50 ++++++++++++++++
arch/x86/kernel/process_64.c | 124 ++++++++++++++++++++++++++++++++++++++++
arch/x86/kernel/ptrace.c | 51 ++---------------
3 files changed, 179 insertions(+), 46 deletions(-)
create mode 100644 arch/x86/include/asm/fsgsbase.h
diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h
new file mode 100644
index 0000000..0404dab
--- /dev/null
+++ b/arch/x86/include/asm/fsgsbase.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_FSGSBASE_H
+#define _ASM_FSGSBASE_H 1
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_X86_64
+
+#include <asm/msr-index.h>
+
+unsigned long x86_fsgsbase_read_task(struct task_struct *task,
+ unsigned short selector);
+
+/*
+ * Read/write a task's fsbase or gsbase. This returns the value that
+ * the FS/GS base would have (if the task were to be resumed). These
+ * work on current or on a different non-running task.
+ */
+unsigned long x86_fsbase_read_task(struct task_struct *task);
+unsigned long x86_gsbase_read_task(struct task_struct *task);
+int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
+int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
+
+/* Helper functions for reading/writing FS/GS base */
+
+static inline unsigned long x86_fsbase_read_cpu(void)
+{
+ unsigned long fsbase;
+
+ rdmsrl(MSR_FS_BASE, fsbase);
+ return fsbase;
+}
+
+void x86_fsbase_write_cpu(unsigned long fsbase);
+
+static inline unsigned long x86_gsbase_read_cpu_inactive(void)
+{
+ unsigned long gsbase;
+
+ rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
+ return gsbase;
+}
+
+void x86_gsbase_write_cpu_inactive(unsigned long gsbase);
+
+#endif /* CONFIG_X86_64 */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_FSGSBASE_H */
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 1e6abfa..52af8c1 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -54,6 +54,7 @@
#include <asm/vdso.h>
#include <asm/intel_rdt_sched.h>
#include <asm/unistd.h>
+#include <asm/fsgsbase.h>
#ifdef CONFIG_IA32_EMULATION
/* Not included via unistd.h */
#include <asm/unistd_32_ia32.h>
@@ -284,6 +285,129 @@ static __always_inline void load_seg_legacy(unsigned short prev_index,
}
}
+unsigned long x86_fsgsbase_read_task(struct task_struct *task,
+ unsigned short selector)
+{
+ unsigned short idx = selector >> 3;
+ unsigned long base;
+
+ if (likely((selector & SEGMENT_TI_MASK) == 0)) {
+ if (unlikely(idx >= GDT_ENTRIES))
+ return 0;
+
+ /*
+ * There are no user segments in the GDT with nonzero bases
+ * other than the TLS segments.
+ */
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return 0;
+
+ idx -= GDT_ENTRY_TLS_MIN;
+ base = get_desc_base(&task->thread.tls_array[idx]);
+ } else {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ struct ldt_struct *ldt;
+
+ /*
+ * If performance here mattered, we could protect the LDT
+ * with RCU. This is a slow path, though, so we can just
+ * take the mutex.
+ */
+ mutex_lock(&task->mm->context.lock);
+ ldt = task->mm->context.ldt;
+ if (unlikely(idx >= ldt->nr_entries))
+ base = 0;
+ else
+ base = get_desc_base(ldt->entries + idx);
+ mutex_unlock(&task->mm->context.lock);
+#else
+ base = 0;
+#endif
+ }
+
+ return base;
+}
+
+void x86_fsbase_write_cpu(unsigned long fsbase)
+{
+ /*
+ * Set the selector to 0 as a notion, that the segment base is
+ * overwritten, which will be checked for skipping the segment load
+ * during context switch.
+ */
+ loadseg(FS, 0);
+ wrmsrl(MSR_FS_BASE, fsbase);
+}
+
+void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
+{
+ /* Set the selector to 0 for the same reason as %fs above. */
+ loadseg(GS, 0);
+ wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+}
+
+unsigned long x86_fsbase_read_task(struct task_struct *task)
+{
+ unsigned long fsbase;
+
+ if (task == current)
+ fsbase = x86_fsbase_read_cpu();
+ else if (task->thread.fsindex == 0)
+ fsbase = task->thread.fsbase;
+ else
+ fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
+
+ return fsbase;
+}
+
+unsigned long x86_gsbase_read_task(struct task_struct *task)
+{
+ unsigned long gsbase;
+
+ if (task == current)
+ gsbase = x86_gsbase_read_cpu_inactive();
+ else if (task->thread.gsindex == 0)
+ gsbase = task->thread.gsbase;
+ else
+ gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
+
+ return gsbase;
+}
+
+int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
+{
+ /*
+ * Not strictly needed for %fs, but do it for symmetry
+ * with %gs
+ */
+ if (unlikely(fsbase >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ task->thread.fsbase = fsbase;
+ if (task == current)
+ x86_fsbase_write_cpu(fsbase);
+ task->thread.fsindex = 0;
+ preempt_enable();
+
+ return 0;
+}
+
+int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
+{
+ if (unlikely(gsbase >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ task->thread.gsbase = gsbase;
+ if (task == current)
+ x86_gsbase_write_cpu_inactive(gsbase);
+ task->thread.gsindex = 0;
+ preempt_enable();
+
+ return 0;
+}
+
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p, unsigned long tls)
{
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 3acbf45..fbde2a7 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -39,7 +39,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/traps.h>
#include <asm/syscall.h>
-#include <asm/mmu_context.h>
+#include <asm/fsgsbase.h>
#include "tls.h"
@@ -343,49 +343,6 @@ static int set_segment_reg(struct task_struct *task,
return 0;
}
-static unsigned long task_seg_base(struct task_struct *task,
- unsigned short selector)
-{
- unsigned short idx = selector >> 3;
- unsigned long base;
-
- if (likely((selector & SEGMENT_TI_MASK) == 0)) {
- if (unlikely(idx >= GDT_ENTRIES))
- return 0;
-
- /*
- * There are no user segments in the GDT with nonzero bases
- * other than the TLS segments.
- */
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return 0;
-
- idx -= GDT_ENTRY_TLS_MIN;
- base = get_desc_base(&task->thread.tls_array[idx]);
- } else {
-#ifdef CONFIG_MODIFY_LDT_SYSCALL
- struct ldt_struct *ldt;
-
- /*
- * If performance here mattered, we could protect the LDT
- * with RCU. This is a slow path, though, so we can just
- * take the mutex.
- */
- mutex_lock(&task->mm->context.lock);
- ldt = task->mm->context.ldt;
- if (unlikely(idx >= ldt->nr_entries))
- base = 0;
- else
- base = get_desc_base(ldt->entries + idx);
- mutex_unlock(&task->mm->context.lock);
-#else
- base = 0;
-#endif
- }
-
- return base;
-}
-
#endif /* CONFIG_X86_32 */
static unsigned long get_flags(struct task_struct *task)
@@ -482,13 +439,15 @@ static unsigned long getreg(struct task_struct *task, unsigned long offset)
if (task->thread.fsindex == 0)
return task->thread.fsbase;
else
- return task_seg_base(task, task->thread.fsindex);
+ return x86_fsgsbase_read_task(task,
+ task->thread.fsindex);
}
case offsetof(struct user_regs_struct, gs_base): {
if (task->thread.gsindex == 0)
return task->thread.gsbase;
else
- return task_seg_base(task, task->thread.gsindex);
+ return x86_fsgsbase_read_task(task,
+ task->thread.gsindex);
}
#endif
}
--
2.7.4
Powered by blists - more mailing lists