[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20121211140502.23621.18600.stgit@srivatsabhat.in.ibm.com>
Date: Tue, 11 Dec 2012 19:35:03 +0530
From: "Srivatsa S. Bhat" <srivatsa.bhat@...ux.vnet.ibm.com>
To: tglx@...utronix.de, peterz@...radead.org,
paulmck@...ux.vnet.ibm.com, rusty@...tcorp.com.au,
mingo@...nel.org, akpm@...ux-foundation.org, namhyung@...nel.org,
vincent.guittot@...aro.org, tj@...nel.org, oleg@...hat.com
Cc: sbw@....edu, amit.kucheria@...aro.org, rostedt@...dmis.org,
rjw@...k.pl, srivatsa.bhat@...ux.vnet.ibm.com,
wangyun@...ux.vnet.ibm.com, xiaoguangrong@...ux.vnet.ibm.com,
nikunj@...ux.vnet.ibm.com, linux-pm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH v4 5/9] sched,
cpu hotplug: Use stable online cpus in try_to_wake_up() &
select_task_rq()
Once stop_machine() is gone from the CPU offline path, we won't be able to
depend on preempt_disable() to prevent CPUs from going offline from under us.
Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline,
while invoking from atomic context.
Scheduler functions such as try_to_wake_up() and select_task_rq() (and even
select_fallback_rq()) deal with picking new CPUs to run tasks. So they need
to synchronize with CPU offline operations.
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@...ux.vnet.ibm.com>
---
kernel/sched/core.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2d8927f..f51e0aa 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1103,6 +1103,10 @@ EXPORT_SYMBOL_GPL(kick_process);
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by both rq->lock and p->pi_lock
+ *
+ * Must be called under get/put_online_cpus_atomic() or
+ * equivalent, to avoid CPUs from going offline from underneath
+ * us.
*/
static int select_fallback_rq(int cpu, struct task_struct *p)
{
@@ -1166,6 +1170,9 @@ out:
/*
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
+ *
+ * Must be called under get/put_online_cpus_atomic(), to prevent
+ * CPUs from going offline from underneath us.
*/
static inline
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
@@ -1406,6 +1413,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
int cpu, success = 0;
smp_wmb();
+ get_online_cpus_atomic();
raw_spin_lock_irqsave(&p->pi_lock, flags);
if (!(p->state & state))
goto out;
@@ -1446,6 +1454,7 @@ stat:
ttwu_stat(p, cpu, wake_flags);
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ put_online_cpus_atomic();
return success;
}
@@ -1624,6 +1633,7 @@ void wake_up_new_task(struct task_struct *p)
unsigned long flags;
struct rq *rq;
+ get_online_cpus_atomic();
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_SMP
/*
@@ -1644,6 +1654,7 @@ void wake_up_new_task(struct task_struct *p)
p->sched_class->task_woken(rq, p);
#endif
task_rq_unlock(rq, p, &flags);
+ put_online_cpus_atomic();
}
#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -2541,6 +2552,7 @@ void sched_exec(void)
unsigned long flags;
int dest_cpu;
+ get_online_cpus_atomic();
raw_spin_lock_irqsave(&p->pi_lock, flags);
dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
@@ -2550,11 +2562,13 @@ void sched_exec(void)
struct migration_arg arg = { p, dest_cpu };
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ put_online_cpus_atomic();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
return;
}
unlock:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ put_online_cpus_atomic();
}
#endif
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists