lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-296f781a4b7801ad9c1c0219f9e87b6c25e196fe@git.kernel.org>
Date:	Fri, 29 Apr 2016 03:50:43 -0700
From:	tip-bot for Andy Lutomirski <tipbot@...or.com>
To:	linux-tip-commits@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, bp@...en8.de, hpa@...or.com,
	dvlasenk@...hat.com, tglx@...utronix.de, luto@...nel.org,
	brgerst@...il.com, torvalds@...ux-foundation.org, mingo@...nel.org,
	peterz@...radead.org, luto@...capital.net
Subject: [tip:x86/asm] x86/asm/64: Rename thread_struct's fs and gs to
 fsbase and gsbase

Commit-ID:  296f781a4b7801ad9c1c0219f9e87b6c25e196fe
Gitweb:     http://git.kernel.org/tip/296f781a4b7801ad9c1c0219f9e87b6c25e196fe
Author:     Andy Lutomirski <luto@...nel.org>
AuthorDate: Tue, 26 Apr 2016 12:23:29 -0700
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Fri, 29 Apr 2016 11:56:42 +0200

x86/asm/64: Rename thread_struct's fs and gs to fsbase and gsbase

Unlike ds and es, these are base addresses, not selectors.  Rename
them so their meaning is more obvious.

On x86_32, the field is still called fs.  Fixing that could make sense
as a future cleanup.

Signed-off-by: Andy Lutomirski <luto@...nel.org>
Cc: Andy Lutomirski <luto@...capital.net>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Brian Gerst <brgerst@...il.com>
Cc: Denys Vlasenko <dvlasenk@...hat.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Link: http://lkml.kernel.org/r/69a18a51c4cba0ce29a241e570fc618ad721d908.1461698311.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/include/asm/elf.h       |  6 +++---
 arch/x86/include/asm/processor.h | 11 +++++++++--
 arch/x86/kernel/process_64.c     | 30 +++++++++++++++---------------
 arch/x86/kernel/ptrace.c         |  8 ++++----
 arch/x86/kvm/svm.c               |  2 +-
 5 files changed, 32 insertions(+), 25 deletions(-)

diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 15340e3..fea7724 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -176,7 +176,7 @@ static inline void elf_common_init(struct thread_struct *t,
 	regs->si = regs->di = regs->bp = 0;
 	regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
 	regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
-	t->fs = t->gs = 0;
+	t->fsbase = t->gsbase = 0;
 	t->fsindex = t->gsindex = 0;
 	t->ds = t->es = ds;
 }
@@ -226,8 +226,8 @@ do {								\
 	(pr_reg)[18] = (regs)->flags;				\
 	(pr_reg)[19] = (regs)->sp;				\
 	(pr_reg)[20] = (regs)->ss;				\
-	(pr_reg)[21] = current->thread.fs;			\
-	(pr_reg)[22] = current->thread.gs;			\
+	(pr_reg)[21] = current->thread.fsbase;			\
+	(pr_reg)[22] = current->thread.gsbase;			\
 	asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v;	\
 	asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v;	\
 	asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v;	\
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 9264476..9251aa9 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -388,9 +388,16 @@ struct thread_struct {
 	unsigned long		ip;
 #endif
 #ifdef CONFIG_X86_64
-	unsigned long		fs;
+	unsigned long		fsbase;
+	unsigned long		gsbase;
+#else
+	/*
+	 * XXX: this could presumably be unsigned short.  Alternatively,
+	 * 32-bit kernels could be taught to use fsindex instead.
+	 */
+	unsigned long fs;
+	unsigned long gs;
 #endif
-	unsigned long		gs;
 
 	/* Save middle states of ptrace breakpoints */
 	struct perf_event	*ptrace_bps[HBP_NUM];
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 864fe2c..4285f6a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -150,9 +150,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
 	p->thread.io_bitmap_ptr = NULL;
 
 	savesegment(gs, p->thread.gsindex);
-	p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
+	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
 	savesegment(fs, p->thread.fsindex);
-	p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
+	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
 	savesegment(es, p->thread.es);
 	savesegment(ds, p->thread.ds);
 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
@@ -329,18 +329,18 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	 * stronger guarantees.)
 	 *
 	 * As an invariant,
-	 * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) is
+	 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
 	 * impossible.
 	 */
 	if (next->fsindex) {
 		/* Loading a nonzero value into FS sets the index and base. */
 		loadsegment(fs, next->fsindex);
 	} else {
-		if (next->fs) {
+		if (next->fsbase) {
 			/* Next index is zero but next base is nonzero. */
 			if (prev_fsindex)
 				loadsegment(fs, 0);
-			wrmsrl(MSR_FS_BASE, next->fs);
+			wrmsrl(MSR_FS_BASE, next->fsbase);
 		} else {
 			/* Next base and index are both zero. */
 			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
@@ -356,7 +356,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 				 * didn't change the base, then the base is
 				 * also zero and we don't need to do anything.
 				 */
-				if (prev->fs || prev_fsindex)
+				if (prev->fsbase || prev_fsindex)
 					loadsegment(fs, 0);
 			}
 		}
@@ -369,18 +369,18 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	 * us.
 	 */
 	if (prev_fsindex)
-		prev->fs = 0;
+		prev->fsbase = 0;
 	prev->fsindex = prev_fsindex;
 
 	if (next->gsindex) {
 		/* Loading a nonzero value into GS sets the index and base. */
 		load_gs_index(next->gsindex);
 	} else {
-		if (next->gs) {
+		if (next->gsbase) {
 			/* Next index is zero but next base is nonzero. */
 			if (prev_gsindex)
 				load_gs_index(0);
-			wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
+			wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
 		} else {
 			/* Next base and index are both zero. */
 			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
@@ -400,7 +400,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 				 * didn't change the base, then the base is
 				 * also zero and we don't need to do anything.
 				 */
-				if (prev->gs || prev_gsindex)
+				if (prev->gsbase || prev_gsindex)
 					load_gs_index(0);
 			}
 		}
@@ -413,7 +413,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	 * us.
 	 */
 	if (prev_gsindex)
-		prev->gs = 0;
+		prev->gsbase = 0;
 	prev->gsindex = prev_gsindex;
 
 	switch_fpu_finish(next_fpu, fpu_switch);
@@ -536,7 +536,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
 			return -EPERM;
 		cpu = get_cpu();
 		task->thread.gsindex = 0;
-		task->thread.gs = addr;
+		task->thread.gsbase = addr;
 		if (doit) {
 			load_gs_index(0);
 			ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
@@ -549,7 +549,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
 			return -EPERM;
 		cpu = get_cpu();
 		task->thread.fsindex = 0;
-		task->thread.fs = addr;
+		task->thread.fsbase = addr;
 		if (doit) {
 			/* set the selector to 0 to not confuse __switch_to */
 			loadsegment(fs, 0);
@@ -562,7 +562,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
 		if (doit)
 			rdmsrl(MSR_FS_BASE, base);
 		else
-			base = task->thread.fs;
+			base = task->thread.fsbase;
 		ret = put_user(base, (unsigned long __user *)addr);
 		break;
 	}
@@ -571,7 +571,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
 		if (doit)
 			rdmsrl(MSR_KERNEL_GS_BASE, base);
 		else
-			base = task->thread.gs;
+			base = task->thread.gsbase;
 		ret = put_user(base, (unsigned long __user *)addr);
 		break;
 	}
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index e72ab40..e60ef91 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -399,7 +399,7 @@ static int putreg(struct task_struct *child,
 		 * to set either thread.fs or thread.fsindex and the
 		 * corresponding GDT slot.
 		 */
-		if (child->thread.fs != value)
+		if (child->thread.fsbase != value)
 			return do_arch_prctl(child, ARCH_SET_FS, value);
 		return 0;
 	case offsetof(struct user_regs_struct,gs_base):
@@ -408,7 +408,7 @@ static int putreg(struct task_struct *child,
 		 */
 		if (value >= TASK_SIZE_OF(child))
 			return -EIO;
-		if (child->thread.gs != value)
+		if (child->thread.gsbase != value)
 			return do_arch_prctl(child, ARCH_SET_GS, value);
 		return 0;
 #endif
@@ -438,14 +438,14 @@ static unsigned long getreg(struct task_struct *task, unsigned long offset)
 		 * XXX: This will not behave as expected if called on
 		 * current or if fsindex != 0.
 		 */
-		return task->thread.fs;
+		return task->thread.fsbase;
 	}
 	case offsetof(struct user_regs_struct, gs_base): {
 		/*
 		 * XXX: This will not behave as expected if called on
 		 * current or if fsindex != 0.
 		 */
-		return task->thread.gs;
+		return task->thread.gsbase;
 	}
 #endif
 	}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 31346a3..fafd720 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1254,7 +1254,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 	kvm_load_ldt(svm->host.ldt);
 #ifdef CONFIG_X86_64
 	loadsegment(fs, svm->host.fs);
-	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
 	load_gs_index(svm->host.gs);
 #else
 #ifdef CONFIG_X86_32_LAZY_GS

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ