[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1280467146-32218-5-git-send-email-ncrao@google.com>
Date: Thu, 29 Jul 2010 22:19:04 -0700
From: Nikhil Rao <ncrao@...gle.com>
To: Ingo Molnar <mingo@...e.hu>, Peter Zijlstra <peterz@...radead.org>,
Mike Galbraith <efault@....de>, linux-kernel@...r.kernel.org
Cc: Venkatesh Pallipadi <venki@...gle.com>,
Ken Chen <kenchen@...gle.com>, Paul Turner <pjt@...gle.com>,
Nikhil Rao <ncrao@...gle.com>
Subject: [PATCH 4/6] sched: add sched_idle_balance argument to lb functions
This patch adds an extra sched_idle_balance argument to move_tasks,
load_balance_fair and balance_tasks. This argument is required to differentiate
between SCHED_NORMAL/SCHED_BATCH load balancing and SCHED_IDLE balancing.
Signed-off-by: Nikhil Rao <ncrao@...gle.com>
---
kernel/sched_fair.c | 18 ++++++++++--------
1 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0e25e51..cb270e8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1861,7 +1861,8 @@ static unsigned long
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move, struct sched_domain *sd,
enum cpu_idle_type idle, int *all_pinned,
- int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
+ int *this_best_prio, struct cfs_rq *busiest_cfs_rq,
+ int sched_idle_balance)
{
int loops = 0, pulled = 0, pinned = 0;
long rem_load_move = max_load_move;
@@ -1923,7 +1924,7 @@ static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned, int *this_best_prio)
+ int *all_pinned, int *this_best_prio, int sched_idle_balance)
{
long rem_load_move = max_load_move;
int busiest_cpu = cpu_of(busiest);
@@ -1949,7 +1950,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
moved_load = balance_tasks(this_rq, this_cpu, busiest,
rem_load, sd, idle, all_pinned, this_best_prio,
- busiest_cfs_rq);
+ busiest_cfs_rq, sched_idle_balance);
if (!moved_load)
continue;
@@ -1970,11 +1971,11 @@ static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned, int *this_best_prio)
+ int *all_pinned, int *this_best_prio, int sched_idle_balance)
{
return balance_tasks(this_rq, this_cpu, busiest,
max_load_move, sd, idle, all_pinned,
- this_best_prio, &busiest->cfs);
+ this_best_prio, &busiest->cfs, sched_idle_balance);
}
#endif
@@ -1988,7 +1989,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned)
+ int *all_pinned, int sched_idle_balance)
{
unsigned long total_load_moved = 0, load_moved;
int this_best_prio = this_rq->curr->prio;
@@ -1996,7 +1997,8 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
do {
load_moved = load_balance_fair(this_rq, this_cpu, busiest,
max_load_move - total_load_moved,
- sd, idle, all_pinned, &this_best_prio);
+ sd, idle, all_pinned, &this_best_prio,
+ sched_idle_balance);
total_load_moved += load_moved;
@@ -2878,7 +2880,7 @@ redo:
local_irq_save(flags);
double_rq_lock(this_rq, busiest);
ld_moved = move_tasks(this_rq, this_cpu, busiest,
- imbalance, sd, idle, &all_pinned);
+ imbalance, sd, idle, &all_pinned, 0);
double_rq_unlock(this_rq, busiest);
local_irq_restore(flags);
--
1.7.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists