[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.00.1511161413270.7097@tp.orcam.me.uk>
Date: Mon, 16 Nov 2015 14:34:47 +0000
From: "Maciej W. Rozycki" <macro@...tec.com>
To: <linux-mips@...ux-mips.org>
CC: Ralf Baechle <ralf@...ux-mips.org>,
Matthew Fortune <Matthew.Fortune@...tec.com>,
Daniel Sanders <Daniel.Sanders@...tec.com>,
Leonid Yegoshin <Leonid.Yegoshin@...tec.com>,
<linux-kernel@...r.kernel.org>
Subject: [RFC PATCH 2/4] MIPS: Factor out FP context preemption
Signed-off-by: Maciej W. Rozycki <macro@...tec.com>
---
Following the discussion around commit 9791554b [MIPS,prctl: add
PR_[GS]ET_FP_MODE prctl options for MIPS] or
<http://patchwork.linux-mips.org/patch/8899/> and Leonid's observation
<http://www.linux-mips.org/cgi-bin/mesg.cgi?a=linux-mips&i=54B02115.7090609%40imgtec.com>
in particular, I agree this would best be done with an IPI, however such
an improvement is independent of the changes made as a part of this series
so I took the minimal approach and left the solution implemented so far
unchanged. Especially as Leonid says he's got a patch already available.
linux-mips-process-fp-context.diff
Index: linux-sfr-test/arch/mips/kernel/process.c
===================================================================
--- linux-sfr-test.orig/arch/mips/kernel/process.c 2015-11-13 00:36:09.885716000 +0000
+++ linux-sfr-test/arch/mips/kernel/process.c 2015-11-16 13:50:18.962058000 +0000
@@ -570,6 +570,60 @@ void arch_trigger_all_cpu_backtrace(bool
smp_call_function(arch_dump_stack, NULL, 1);
}
+/*
+ * Make the FP context available for mode changes.
+ */
+static void mips_get_fp_context(struct task_struct *task)
+{
+ unsigned long switch_count;
+ struct task_struct *t;
+
+ /* Save FP & vector context, then disable FPU & MSA. */
+ if (task->signal == current->signal)
+ lose_fpu(1);
+
+ /* Prevent any threads from obtaining live FP context. */
+ atomic_set(&task->mm->context.fp_mode_switching, 1);
+ smp_mb__after_atomic();
+
+ /*
+ * If there are multiple online CPUs then wait until all threads
+ * whose FP mode is about to change have been context switched.
+ * This approach allows us to only worry about whether an FP mode
+ * switch is in progress when FP is first used in a tasks time
+ * slice. Pretty much all of the mode switch overhead can thus
+ * be confined to cases where mode switches are actually occurring.
+ * That is, to here. However for the thread performing the mode
+ * switch it may take a while...
+ */
+ if (num_online_cpus() > 1) {
+ spin_lock_irq(&task->sighand->siglock);
+
+ for_each_thread(task, t) {
+ if (t == current)
+ continue;
+
+ switch_count = t->nvcsw + t->nivcsw;
+
+ do {
+ spin_unlock_irq(&task->sighand->siglock);
+ cond_resched();
+ spin_lock_irq(&task->sighand->siglock);
+ } while ((t->nvcsw + t->nivcsw) == switch_count);
+ }
+
+ spin_unlock_irq(&task->sighand->siglock);
+ }
+}
+
+/*
+ * Allow threads to use FP again.
+ */
+static void mips_put_fp_context(struct task_struct *task)
+{
+ atomic_set(&task->mm->context.fp_mode_switching, 0);
+}
+
int mips_get_process_fp_mode(struct task_struct *task)
{
int value = 0;
@@ -585,7 +639,6 @@ int mips_get_process_fp_mode(struct task
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
- unsigned long switch_count;
struct task_struct *t;
/* Check the value is valid */
@@ -603,41 +656,7 @@ int mips_set_process_fp_mode(struct task
if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
- /* Save FP & vector context, then disable FPU & MSA */
- if (task->signal == current->signal)
- lose_fpu(1);
-
- /* Prevent any threads from obtaining live FP context */
- atomic_set(&task->mm->context.fp_mode_switching, 1);
- smp_mb__after_atomic();
-
- /*
- * If there are multiple online CPUs then wait until all threads whose
- * FP mode is about to change have been context switched. This approach
- * allows us to only worry about whether an FP mode switch is in
- * progress when FP is first used in a tasks time slice. Pretty much all
- * of the mode switch overhead can thus be confined to cases where mode
- * switches are actually occuring. That is, to here. However for the
- * thread performing the mode switch it may take a while...
- */
- if (num_online_cpus() > 1) {
- spin_lock_irq(&task->sighand->siglock);
-
- for_each_thread(task, t) {
- if (t == current)
- continue;
-
- switch_count = t->nvcsw + t->nivcsw;
-
- do {
- spin_unlock_irq(&task->sighand->siglock);
- cond_resched();
- spin_lock_irq(&task->sighand->siglock);
- } while ((t->nvcsw + t->nivcsw) == switch_count);
- }
-
- spin_unlock_irq(&task->sighand->siglock);
- }
+ mips_get_fp_context(task);
/*
* There are now no threads of the process with live FP context, so it
@@ -659,8 +678,7 @@ int mips_set_process_fp_mode(struct task
clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
}
- /* Allow threads to use FP again */
- atomic_set(&task->mm->context.fp_mode_switching, 0);
+ mips_put_fp_context(task);
return 0;
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists