[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20071204204521.3567.2646.stgit@novell1.haskins.net>
Date: Tue, 04 Dec 2007 15:45:21 -0500
From: Gregory Haskins <ghaskins@...ell.com>
To: mingo@...e.hu
Cc: rostedt@...dmis.org, ghaskins@...ell.com,
linux-rt-users@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 11/23] Subject: SCHED - Break out the search function
Isolate the search logic into a function so that it can be used later
in places other than find_locked_lowest_rq().
(Checkpatch error is inherited from moved code)
Signed-off-by: Gregory Haskins <ghaskins@...ell.com>
Signed-off-by: Steven Rostedt <srostedt@...hat.com>
---
kernel/sched_rt.c | 66 +++++++++++++++++++++++++++++++----------------------
1 files changed, 39 insertions(+), 27 deletions(-)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index da21c7a..7e26c2c 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -261,54 +261,66 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
-/* Will lock the rq it finds */
-static struct rq *find_lock_lowest_rq(struct task_struct *task,
- struct rq *this_rq)
+static int find_lowest_rq(struct task_struct *task)
{
- struct rq *lowest_rq = NULL;
int cpu;
- int tries;
cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask);
+ struct rq *lowest_rq = NULL;
cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed);
- for (tries = 0; tries < RT_MAX_TRIES; tries++) {
- /*
- * Scan each rq for the lowest prio.
- */
- for_each_cpu_mask(cpu, *cpu_mask) {
- struct rq *rq = &per_cpu(runqueues, cpu);
+ /*
+ * Scan each rq for the lowest prio.
+ */
+ for_each_cpu_mask(cpu, *cpu_mask) {
+ struct rq *rq = cpu_rq(cpu);
- if (cpu == this_rq->cpu)
- continue;
+ if (cpu == rq->cpu)
+ continue;
- /* We look for lowest RT prio or non-rt CPU */
- if (rq->rt.highest_prio >= MAX_RT_PRIO) {
- lowest_rq = rq;
- break;
- }
+ /* We look for lowest RT prio or non-rt CPU */
+ if (rq->rt.highest_prio >= MAX_RT_PRIO) {
+ lowest_rq = rq;
+ break;
+ }
- /* no locking for now */
- if (rq->rt.highest_prio > task->prio &&
- (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
- lowest_rq = rq;
- }
+ /* no locking for now */
+ if (rq->rt.highest_prio > task->prio &&
+ (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
+ lowest_rq = rq;
}
+ }
+
+ return lowest_rq ? lowest_rq->cpu : -1;
+}
+
+/* Will lock the rq it finds */
+static struct rq *find_lock_lowest_rq(struct task_struct *task,
+ struct rq *rq)
+{
+ struct rq *lowest_rq = NULL;
+ int cpu;
+ int tries;
- if (!lowest_rq)
+ for (tries = 0; tries < RT_MAX_TRIES; tries++) {
+ cpu = find_lowest_rq(task);
+
+ if (cpu == -1)
break;
+ lowest_rq = cpu_rq(cpu);
+
/* if the prio of this runqueue changed, try again */
- if (double_lock_balance(this_rq, lowest_rq)) {
+ if (double_lock_balance(rq, lowest_rq)) {
/*
* We had to unlock the run queue. In
* the mean time, task could have
* migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq.
*/
- if (unlikely(task_rq(task) != this_rq ||
+ if (unlikely(task_rq(task) != rq ||
!cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
- task_running(this_rq, task) ||
+ task_running(rq, task) ||
!task->se.on_rq)) {
spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists