lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue,  5 May 2015 18:24:25 +0200
From:	Ingo Molnar <mingo@...nel.org>
To:	linux-kernel@...r.kernel.org
Cc:	Andy Lutomirski <luto@...capital.net>,
	Borislav Petkov <bp@...en8.de>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	Fenghua Yu <fenghua.yu@...el.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Oleg Nesterov <oleg@...hat.com>,
	Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 045/208] x86/fpu: Eliminate the __thread_has_fpu() wrapper

Start migrating FPU methods towards using 'struct fpu *fpu'
directly. __thread_has_fpu() is just a trivial wrapper around
fpu->has_fpu, eliminate it.

Reviewed-by: Borislav Petkov <bp@...en8.de>
Cc: Andy Lutomirski <luto@...capital.net>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Fenghua Yu <fenghua.yu@...el.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Oleg Nesterov <oleg@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/include/asm/fpu-internal.h | 16 ++++------------
 arch/x86/kernel/fpu/core.c          | 17 ++++++++++-------
 2 files changed, 14 insertions(+), 19 deletions(-)

diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index e180fb96dd0d..c005d1fc1247 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -323,16 +323,6 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
 	return fpu_restore_checking(&tsk->thread.fpu);
 }
 
-/*
- * Software FPU state helpers. Careful: these need to
- * be preemption protection *and* they need to be
- * properly paired with the CR0.TS changes!
- */
-static inline int __thread_has_fpu(struct task_struct *tsk)
-{
-	return tsk->thread.fpu.has_fpu;
-}
-
 /* Must be paired with an 'stts' after! */
 static inline void __thread_clear_has_fpu(struct task_struct *tsk)
 {
@@ -370,13 +360,14 @@ static inline void __thread_fpu_begin(struct task_struct *tsk)
 
 static inline void drop_fpu(struct task_struct *tsk)
 {
+	struct fpu *fpu = &tsk->thread.fpu;
 	/*
 	 * Forget coprocessor state..
 	 */
 	preempt_disable();
 	tsk->thread.fpu.counter = 0;
 
-	if (__thread_has_fpu(tsk)) {
+	if (fpu->has_fpu) {
 		/* Ignore delayed exceptions from user space */
 		asm volatile("1: fwait\n"
 			     "2:\n"
@@ -424,6 +415,7 @@ typedef struct { int preload; } fpu_switch_t;
 
 static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
 {
+	struct fpu *old_fpu = &old->thread.fpu;
 	fpu_switch_t fpu;
 
 	/*
@@ -433,7 +425,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
 	fpu.preload = tsk_used_math(new) &&
 		      (use_eager_fpu() || new->thread.fpu.counter > 5);
 
-	if (__thread_has_fpu(old)) {
+	if (old_fpu->has_fpu) {
 		if (!fpu_save_init(&old->thread.fpu))
 			task_disable_lazy_fpu_restore(old);
 		else
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 7711539bcda5..c0633ca8fece 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -57,8 +57,7 @@ static bool interrupted_kernel_fpu_idle(void)
 	if (use_eager_fpu())
 		return true;
 
-	return !__thread_has_fpu(current) &&
-		(read_cr0() & X86_CR0_TS);
+	return !current->thread.fpu.has_fpu && (read_cr0() & X86_CR0_TS);
 }
 
 /*
@@ -93,11 +92,12 @@ EXPORT_SYMBOL(irq_fpu_usable);
 void __kernel_fpu_begin(void)
 {
 	struct task_struct *me = current;
+	struct fpu *fpu = &me->thread.fpu;
 
 	kernel_fpu_disable();
 
-	if (__thread_has_fpu(me)) {
-		fpu_save_init(&me->thread.fpu);
+	if (fpu->has_fpu) {
+		fpu_save_init(fpu);
 	} else {
 		this_cpu_write(fpu_owner_task, NULL);
 		if (!use_eager_fpu())
@@ -109,8 +109,9 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
 void __kernel_fpu_end(void)
 {
 	struct task_struct *me = current;
+	struct fpu *fpu = &me->thread.fpu;
 
-	if (__thread_has_fpu(me)) {
+	if (fpu->has_fpu) {
 		if (WARN_ON(restore_fpu_checking(me)))
 			fpu_reset_state(me);
 	} else if (!use_eager_fpu()) {
@@ -128,14 +129,16 @@ EXPORT_SYMBOL(__kernel_fpu_end);
  */
 void fpu__save(struct task_struct *tsk)
 {
+	struct fpu *fpu = &tsk->thread.fpu;
+
 	WARN_ON(tsk != current);
 
 	preempt_disable();
-	if (__thread_has_fpu(tsk)) {
+	if (fpu->has_fpu) {
 		if (use_eager_fpu()) {
 			__save_fpu(tsk);
 		} else {
-			fpu_save_init(&tsk->thread.fpu);
+			fpu_save_init(fpu);
 			__thread_fpu_end(tsk);
 		}
 	}
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ