Its a bit awkward but it was the least painful means of modifying the queue selection. Used in a later patch to conditionally use a random queue. Signed-off-by: Peter Zijlstra Cc: Paul Turner Cc: Lee Schermerhorn Cc: Christoph Lameter Cc: Rik van Riel Cc: Andrew Morton Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) Index: tip/kernel/sched/fair.c =================================================================== --- tip.orig/kernel/sched/fair.c +++ tip/kernel/sched/fair.c @@ -3063,6 +3063,9 @@ struct lb_env { unsigned int loop; unsigned int loop_break; unsigned int loop_max; + + struct rq * (*find_busiest_queue)(struct lb_env *, + struct sched_group *); }; /* @@ -4236,13 +4239,14 @@ static int load_balance(int this_cpu, st struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); struct lb_env env = { - .sd = sd, - .dst_cpu = this_cpu, - .dst_rq = this_rq, - .dst_grpmask = sched_group_cpus(sd->groups), - .idle = idle, - .loop_break = sched_nr_migrate_break, - .cpus = cpus, + .sd = sd, + .dst_cpu = this_cpu, + .dst_rq = this_rq, + .dst_grpmask = sched_group_cpus(sd->groups), + .idle = idle, + .loop_break = sched_nr_migrate_break, + .cpus = cpus, + .find_busiest_queue = find_busiest_queue, }; cpumask_copy(cpus, cpu_active_mask); @@ -4261,7 +4265,7 @@ redo: goto out_balanced; } - busiest = find_busiest_queue(&env, group); + busiest = env.find_busiest_queue(&env, group); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/