[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1529536506-26237-3-git-send-email-chang.seok.bae@intel.com>
Date: Wed, 20 Jun 2018 16:15:01 -0700
From: "Chang S. Bae" <chang.seok.bae@...el.com>
To: Andy Lutomirski <luto@...nel.org>,
"H . Peter Anvin" <hpa@...or.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...nel.org>
Cc: Andi Kleen <ak@...ux.intel.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Markus T Metzger <markus.t.metzger@...el.com>,
Ravi Shankar <ravi.v.shankar@...el.com>,
"Chang S . Bae" <chang.seok.bae@...el.com>,
LKML <linux-kernel@...r.kernel.org>
Subject: [PATCH v4 2/7] x86/fsgsbase/64: Make ptrace read FS/GS base accurately
From: Andy Lutomirski <luto@...nel.org>
ptrace can read FS/GS base using the register access API
(PTRACE_PEEKUSER, etc) or PTRACE_ARCH_PRCTL. Make both of these
mechanisms return the actual FS/GS base.
This will improve debuggability by providing the correct information
to ptracer (GDB and etc).
Signed-off-by: Andy Lutomirski <luto@...nel.org>
[chang: Rebase and revise patch description]
Signed-off-by: Chang S. Bae <chang.seok.bae@...el.com>
Reviewed-by: Andi Kleen <ak@...ux.intel.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...nel.org>
---
arch/x86/kernel/process_64.c | 67 +++++++++++++++++++++++++++++++++-----------
1 file changed, 51 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ace0158..e498671 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -279,6 +279,49 @@ static __always_inline void load_seg_legacy(unsigned short prev_index,
}
}
+static unsigned long task_seg_base(struct task_struct *task,
+ unsigned short selector)
+{
+ unsigned short idx = selector >> 3;
+ unsigned long base;
+
+ if (likely((selector & SEGMENT_TI_MASK) == 0)) {
+ if (unlikely(idx >= GDT_ENTRIES))
+ return 0;
+
+ /*
+ * There are no user segments in the GDT with nonzero bases
+ * other than the TLS segments.
+ */
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return 0;
+
+ idx -= GDT_ENTRY_TLS_MIN;
+ base = get_desc_base(&task->thread.tls_array[idx]);
+ } else {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ struct ldt_struct *ldt;
+
+ /*
+ * If performance here mattered, we could protect the LDT
+ * with RCU. This is a slow path, though, so we can just
+ * take the mutex.
+ */
+ mutex_lock(&task->mm->context.lock);
+ ldt = task->mm->context.ldt;
+ if (unlikely(idx >= ldt->nr_entries))
+ base = 0;
+ else
+ base = get_desc_base(ldt->entries + idx);
+ mutex_unlock(&task->mm->context.lock);
+#else
+ base = 0;
+#endif
+ }
+
+ return base;
+}
+
void write_fsbase(unsigned long fsbase)
{
/* set the selector to 0 to not confuse __switch_to */
@@ -297,16 +340,12 @@ unsigned long read_task_fsbase(struct task_struct *task)
{
unsigned long fsbase;
- if (task == current) {
+ if (task == current)
fsbase = read_fsbase();
- } else {
- /*
- * XXX: This will not behave as expected if called
- * if fsindex != 0. This preserves an existing bug
- * that will be fixed.
- */
+ else if (task->thread.fsindex == 0)
fsbase = task->thread.fsbase;
- }
+ else
+ fsbase = task_seg_base(task, task->thread.fsindex);
return fsbase;
}
@@ -315,16 +354,12 @@ unsigned long read_task_gsbase(struct task_struct *task)
{
unsigned long gsbase;
- if (task == current) {
+ if (task == current)
gsbase = read_inactive_gsbase();
- } else {
- /*
- * XXX: This will not behave as expected if called
- * if gsindex != 0. Same bug preservation as above
- * read_task_fsbase.
- */
+ else if (task->thread.gsindex == 0)
gsbase = task->thread.gsbase;
- }
+ else
+ gsbase = task_seg_base(task, task->thread.gsindex);
return gsbase;
}
--
2.7.4
Powered by blists - more mailing lists