[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1475353895-22175-2-git-send-email-riel@redhat.com>
Date: Sat, 1 Oct 2016 16:31:31 -0400
From: riel@...hat.com
To: linux-kernel@...r.kernel.org
Cc: x86@...nel.org, tglx@...utronix.de, pbonzini@...hat.com,
mingo@...hat.com, luto@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, bp@...e.de
Subject: [PATCH RFC 1/5] x86,fpu: split prev/next task fpu state handling
From: Rik van Riel <riel@...hat.com>
Move all handling of the next state FPU state handling into
switch_fpu_finish, in preparation for more lazily switching
FPU states.
CR0.TS state is mirrored in a per-cpu variable, instead of
being passed around in a local variable, because that will
not be possible later in the series.
Signed-off-by: Rik van Riel <riel@...hat.com>
---
arch/x86/include/asm/fpu/internal.h | 78 ++++++++++++++++---------------------
arch/x86/kernel/fpu/core.c | 1 +
arch/x86/kernel/process_32.c | 5 +--
arch/x86/kernel/process_64.c | 5 +--
4 files changed, 39 insertions(+), 50 deletions(-)
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 2737366ea583..79e1cee9f3b0 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -482,6 +482,7 @@ extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size)
*/
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
+DECLARE_PER_CPU(bool, fpu_active);
/*
* Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
@@ -577,28 +578,15 @@ static inline void fpregs_deactivate(struct fpu *fpu)
*
* This is a two-stage process:
*
- * - switch_fpu_prepare() saves the old state and
- * sets the new state of the CR0.TS bit. This is
- * done within the context of the old process.
+ * - switch_fpu_prepare() saves the old state.
+ * This is done within the context of the old process.
*
- * - switch_fpu_finish() restores the new state as
- * necessary.
+ * - switch_fpu_finish() restores the new state
+ * and flips CR0.TS as necessary.
*/
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t
-switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+static inline void
+switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- fpu_switch_t fpu;
-
- /*
- * If the task has used the math, pre-load the FPU on xsave processors
- * or if the past 5 consecutive context-switches used math.
- */
- fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active &&
- (use_eager_fpu() || new_fpu->counter > 5);
-
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
@@ -608,29 +596,10 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
/* But leave fpu_fpregs_owner_ctx! */
old_fpu->fpregs_active = 0;
trace_x86_fpu_regs_deactivated(old_fpu);
-
- /* Don't change CR0.TS if we just switch! */
- if (fpu.preload) {
- new_fpu->counter++;
- __fpregs_activate(new_fpu);
- trace_x86_fpu_regs_activated(new_fpu);
- prefetch(&new_fpu->state);
- } else {
- __fpregs_deactivate_hw();
- }
} else {
old_fpu->counter = 0;
old_fpu->last_cpu = -1;
- if (fpu.preload) {
- new_fpu->counter++;
- if (fpu_want_lazy_restore(new_fpu, cpu))
- fpu.preload = 0;
- else
- prefetch(&new_fpu->state);
- fpregs_activate(new_fpu);
- }
}
- return fpu;
}
/*
@@ -638,15 +607,36 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
*/
/*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
+ * Set up the userspace FPU context for the new task.
*/
-static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
+static inline void switch_fpu_finish(struct fpu *new_fpu)
{
- if (fpu_switch.preload)
+ bool preload;
+ /*
+ * If the task has used the math, pre-load the FPU on xsave processors
+ * or if the past 5 consecutive context-switches used math.
+ */
+ preload = static_cpu_has(X86_FEATURE_FPU) &&
+ new_fpu->fpstate_active &&
+ (use_eager_fpu() || new_fpu->counter > 5);
+
+ if (preload) {
+ prefetch(&new_fpu->state);
+ new_fpu->counter++;
+ __fpregs_activate(new_fpu);
+ trace_x86_fpu_regs_activated(new_fpu);
+
+ /* Don't change CR0.TS if we just switch! */
+ if (!__this_cpu_read(fpu_active)) {
+ __fpregs_activate_hw();
+ __this_cpu_write(fpu_active, true);
+ }
+
copy_kernel_to_fpregs(&new_fpu->state);
+ } else if (__this_cpu_read(fpu_active)) {
+ __this_cpu_write(fpu_active, false);
+ __fpregs_deactivate_hw();
+ }
}
/*
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 3fc03a09a93b..82cd46584528 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -39,6 +39,7 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu);
* Track which context is using the FPU on the CPU:
*/
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
+DEFINE_PER_CPU(bool, fpu_active) = false;
static void kernel_fpu_disable(void)
{
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index d86be29c38c7..8cd2f42190dc 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -247,11 +247,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
- fpu_switch_t fpu_switch;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
- fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+ switch_fpu_prepare(prev_fpu, cpu);
/*
* Save away %gs. No need to save %fs, as it was saved on the
@@ -310,7 +309,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (prev->gs | next->gs)
lazy_load_gs(next->gs);
- switch_fpu_finish(next_fpu, fpu_switch);
+ switch_fpu_finish(next_fpu);
this_cpu_write(current_task, next_p);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 63236d8f84bf..92b9485a6a18 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -264,9 +264,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
unsigned prev_fsindex, prev_gsindex;
- fpu_switch_t fpu_switch;
- fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+ switch_fpu_prepare(prev_fpu, cpu);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
@@ -416,7 +415,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
prev->gsbase = 0;
prev->gsindex = prev_gsindex;
- switch_fpu_finish(next_fpu, fpu_switch);
+ switch_fpu_finish(next_fpu);
/*
* Switch the PDA and FPU contexts.
--
2.7.4
Powered by blists - more mailing lists