Refactor the global load updates from update_shares_cpu() so that update_cfs_load() can update global load when it is more than ~10% out of sync. The new global_load parameter allows us to force an update, regardless of the error factor so that we can synchronize w/ update_shares(). Signed-off-by: Paul Turner --- kernel/sched_fair.c | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) Index: kernel/sched_fair.c =================================================================== --- kernel/sched_fair.c.orig +++ kernel/sched_fair.c @@ -538,7 +538,7 @@ static u64 sched_vslice(struct cfs_rq *c return calc_delta_fair(sched_slice(cfs_rq, se), se); } -static void update_cfs_load(struct cfs_rq *cfs_rq); +static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update); static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta); /* @@ -563,7 +563,7 @@ __update_curr(struct cfs_rq *cfs_rq, str cfs_rq->load_unacc_exec_time += delta_exec; if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { - update_cfs_load(cfs_rq); + update_cfs_load(cfs_rq, 0); update_cfs_shares(cfs_rq, 0); } } @@ -701,7 +701,22 @@ account_entity_dequeue(struct cfs_rq *cf } #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED -static void update_cfs_load(struct cfs_rq *cfs_rq) +static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, + int global_update) +{ + struct task_group *tg = cfs_rq->tg; + long load_avg; + + load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); + load_avg -= cfs_rq->load_contribution; + + if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) { + atomic_add(load_avg, &tg->load_weight); + cfs_rq->load_contribution += load_avg; + } +} + +static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) { u64 period = sysctl_sched_shares_window; u64 now, delta; @@ -728,6 +743,11 @@ static void update_cfs_load(struct cfs_r cfs_rq->load_avg += delta * load; } + /* consider updating load contribution on each fold or truncate */ + if (global_update || cfs_rq->load_period > period + || !cfs_rq->load_period) + update_cfs_rq_load_contribution(cfs_rq, global_update); + while (cfs_rq->load_period > period) { /* * Inline assembly required to prevent the compiler @@ -917,7 +937,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, st * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); - update_cfs_load(cfs_rq); + update_cfs_load(cfs_rq, 0); update_cfs_shares(cfs_rq_of(se), se->load.weight); account_entity_enqueue(cfs_rq, se); @@ -978,7 +998,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, st if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); se->on_rq = 0; - update_cfs_load(cfs_rq); + update_cfs_load(cfs_rq, 0); account_entity_dequeue(cfs_rq, se); update_min_vruntime(cfs_rq); update_cfs_shares(cfs_rq_of(se), 0); @@ -1213,7 +1233,7 @@ enqueue_task_fair(struct rq *rq, struct for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); - update_cfs_load(cfs_rq); + update_cfs_load(cfs_rq, 0); update_cfs_shares(cfs_rq, 0); } @@ -1243,7 +1263,7 @@ static void dequeue_task_fair(struct rq for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); - update_cfs_load(cfs_rq); + update_cfs_load(cfs_rq, 0); update_cfs_shares(cfs_rq, 0); } @@ -2045,7 +2065,6 @@ static int update_shares_cpu(struct task struct cfs_rq *cfs_rq; unsigned long flags; struct rq *rq; - long load_avg; if (!tg->se[cpu]) return 0; @@ -2056,12 +2075,7 @@ static int update_shares_cpu(struct task raw_spin_lock_irqsave(&rq->lock, flags); update_rq_clock(rq); - update_cfs_load(cfs_rq); - - load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); - load_avg -= cfs_rq->load_contribution; - atomic_add(load_avg, &tg->load_weight); - cfs_rq->load_contribution += load_avg; + update_cfs_load(cfs_rq, 1); /* * We need to update shares after updating tg->load_weight in -- -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/