lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue,  5 May 2015 18:24:40 +0200
From:	Ingo Molnar <mingo@...nel.org>
To:	linux-kernel@...r.kernel.org
Cc:	Andy Lutomirski <luto@...capital.net>,
	Borislav Petkov <bp@...en8.de>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	Fenghua Yu <fenghua.yu@...el.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Oleg Nesterov <oleg@...hat.com>,
	Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 060/208] x86/fpu: Use 'struct fpu' in switch_fpu_prepare()

Migrate this function to pure 'struct fpu' usage.

Reviewed-by: Borislav Petkov <bp@...en8.de>
Cc: Andy Lutomirski <luto@...capital.net>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Fenghua Yu <fenghua.yu@...el.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Oleg Nesterov <oleg@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/include/asm/fpu-internal.h | 27 +++++++++++++--------------
 arch/x86/kernel/process_32.c        |  2 +-
 arch/x86/kernel/process_64.c        |  2 +-
 3 files changed, 15 insertions(+), 16 deletions(-)

diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 579f7d0a399d..60d2c6f376f3 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -402,10 +402,9 @@ static inline void fpu_reset_state(struct fpu *fpu)
  */
 typedef struct { int preload; } fpu_switch_t;
 
-static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
+static inline fpu_switch_t
+switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 {
-	struct fpu *old_fpu = &old->thread.fpu;
-	struct fpu *new_fpu = &new->thread.fpu;
 	fpu_switch_t fpu;
 
 	/*
@@ -413,33 +412,33 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
 	 * or if the past 5 consecutive context-switches used math.
 	 */
 	fpu.preload = new_fpu->fpstate_active &&
-		      (use_eager_fpu() || new->thread.fpu.counter > 5);
+		      (use_eager_fpu() || new_fpu->counter > 5);
 
 	if (old_fpu->has_fpu) {
-		if (!fpu_save_init(&old->thread.fpu))
-			old->thread.fpu.last_cpu = -1;
+		if (!fpu_save_init(old_fpu))
+			old_fpu->last_cpu = -1;
 		else
-			old->thread.fpu.last_cpu = cpu;
+			old_fpu->last_cpu = cpu;
 
 		/* But leave fpu_fpregs_owner_ctx! */
-		old->thread.fpu.has_fpu = 0;
+		old_fpu->has_fpu = 0;
 
 		/* Don't change CR0.TS if we just switch! */
 		if (fpu.preload) {
-			new->thread.fpu.counter++;
+			new_fpu->counter++;
 			__thread_set_has_fpu(new_fpu);
-			prefetch(new->thread.fpu.state);
+			prefetch(new_fpu->state);
 		} else if (!use_eager_fpu())
 			stts();
 	} else {
-		old->thread.fpu.counter = 0;
-		old->thread.fpu.last_cpu = -1;
+		old_fpu->counter = 0;
+		old_fpu->last_cpu = -1;
 		if (fpu.preload) {
-			new->thread.fpu.counter++;
+			new_fpu->counter++;
 			if (fpu_want_lazy_restore(new_fpu, cpu))
 				fpu.preload = 0;
 			else
-				prefetch(new->thread.fpu.state);
+				prefetch(new_fpu->state);
 			__thread_fpu_begin(new_fpu);
 		}
 	}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 1a0edce626b2..5b0ed71dde60 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -248,7 +248,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
 	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
-	fpu = switch_fpu_prepare(prev_p, next_p, cpu);
+	fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
 
 	/*
 	 * Save away %gs. No need to save %fs, as it was saved on the
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 99cc4b8589ad..fefe65efd9d6 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -278,7 +278,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	unsigned fsindex, gsindex;
 	fpu_switch_t fpu;
 
-	fpu = switch_fpu_prepare(prev_p, next_p, cpu);
+	fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
 
 	/* We must save %fs and %gs before load_TLS() because
 	 * %fs and %gs may be cleared by load_TLS().
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ