Its a bit awkward but it was the least painful means of modifying the queue selection. Used in a later patch to conditionally use a random queue. Cc: Paul Turner Cc: Lee Schermerhorn Cc: Christoph Lameter Cc: Rik van Riel Cc: Andrew Morton Cc: Linus Torvalds Signed-off-by: Peter Zijlstra --- kernel/sched/fair.c | 19 ++++++++++++------- 1 files changed, 12 insertions(+), 7 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 22321db..9c4164e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3074,6 +3074,10 @@ struct lb_env { unsigned int loop; unsigned int loop_break; unsigned int loop_max; + + struct rq * (*find_busiest_queue)(struct lb_env *, + struct sched_group *, + const struct cpumask *); }; /* @@ -4246,12 +4250,13 @@ static int load_balance(int this_cpu, struct rq *this_rq, struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); struct lb_env env = { - .sd = sd, - .dst_cpu = this_cpu, - .dst_rq = this_rq, - .dst_grpmask = sched_group_cpus(sd->groups), - .idle = idle, - .loop_break = sched_nr_migrate_break, + .sd = sd, + .dst_cpu = this_cpu, + .dst_rq = this_rq, + .dst_grpmask = sched_group_cpus(sd->groups), + .idle = idle, + .loop_break = sched_nr_migrate_break, + .find_busiest_queue = find_busiest_queue, }; cpumask_copy(cpus, cpu_active_mask); @@ -4270,7 +4275,7 @@ redo: goto out_balanced; } - busiest = find_busiest_queue(&env, group, cpus); + busiest = env.find_busiest_queue(&env, group, cpus); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; -- 1.7.2.3 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/