Cache the divide in the hope we can avoid doing it in steady state opteration. Signed-off-by: Peter Zijlstra --- kernel/sched.c | 2 ++ kernel/sched_debug.c | 1 + kernel/sched_fair.c | 6 ++---- 3 files changed, 5 insertions(+), 4 deletions(-) Index: linux-2.6-2/kernel/sched.c =================================================================== --- linux-2.6-2.orig/kernel/sched.c +++ linux-2.6-2/kernel/sched.c @@ -1298,11 +1298,13 @@ calc_delta_mine(unsigned long delta_exec static inline void update_load_add(struct load_weight *lw, unsigned long inc) { lw->weight += inc; + lw->inv_weight = 0; } static inline void update_load_sub(struct load_weight *lw, unsigned long dec) { lw->weight -= dec; + lw->inv_weight = 0; } /* Index: linux-2.6-2/kernel/sched_debug.c =================================================================== --- linux-2.6-2.orig/kernel/sched_debug.c +++ linux-2.6-2/kernel/sched_debug.c @@ -137,6 +137,7 @@ void print_cfs_rq(struct seq_file *m, in SPLIT_NS(spread0)); SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); + SEQ_printf(m, " .%-30s: %ld\n", "load^-1", cfs_rq->load.inv_weight); #ifdef CONFIG_SCHEDSTATS SEQ_printf(m, " .%-30s: %d\n", "bkl_count", rq->bkl_count); Index: linux-2.6-2/kernel/sched_fair.c =================================================================== --- linux-2.6-2.orig/kernel/sched_fair.c +++ linux-2.6-2/kernel/sched_fair.c @@ -369,10 +369,8 @@ static u64 sched_slice(struct cfs_rq *cf goto out; for_each_sched_entity(se) { - cfs_rq = cfs_rq_of(se); - - slice *= se->load.weight; - do_div(slice, cfs_rq->load.weight); + slice = calc_delta_mine(slice, + se->load.weight, &cfs_rq_of(se)->load); } out: -- -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/