[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20161026114522.GH3102@twins.programming.kicks-ass.net>
Date: Wed, 26 Oct 2016 13:45:22 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Vincent Guittot <vincent.guittot@...aro.org>
Cc: mingo@...nel.org, linux-kernel@...r.kernel.org,
dietmar.eggemann@....com, yuyang.du@...el.com,
Morten.Rasmussen@....com, linaro-kernel@...ts.linaro.org,
pjt@...gle.com, bsegall@...gle.com, kernellwp@...il.com
Subject: Re: [PATCH 4/6 v5] sched: propagate load during synchronous
attach/detach
On Mon, Oct 17, 2016 at 11:14:11AM +0200, Vincent Guittot wrote:
> +/* Take into account change of load of a child task group */
> +static inline void
> +update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se)
> +{
> + struct cfs_rq *gcfs_rq = group_cfs_rq(se);
> + long delta, load = gcfs_rq->avg.load_avg;
> +
> + /*
> + * If the load of group cfs_rq is null, the load of the
> + * sched_entity will also be null so we can skip the formula
> + */
Does it make sense to do:
if (!load)
goto no_load;
and avoid the indent?
> + if (load) {
> + long tg_load;
> +
> + /* Get tg's load and ensure tg_load > 0 */
> + tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1;
> +
> + /* Ensure tg_load >= load and updated with current load*/
> + tg_load -= gcfs_rq->tg_load_avg_contrib;
> + tg_load += load;
> +
> + /*
> + * We need to compute a correction term in the case that the
> + * task group is consuming more cpu than a task of equal
> + * weight. A task with a weight equals to tg->shares will have
> + * a load less or equal to scale_load_down(tg->shares).
+ *
> + * Similarly, the sched_entities that represent the task group
> + * at parent level, can't have a load higher than
> + * scale_load_down(tg->shares). And the Sum of sched_entities'
> + * load must be <= scale_load_down(tg->shares).
> + */
> + if (tg_load > scale_load_down(gcfs_rq->tg->shares)) {
> + /* scale gcfs_rq's load into tg's shares*/
> + load *= scale_load_down(gcfs_rq->tg->shares);
> + load /= tg_load;
> + }
> + }
> +
no_load:
> + delta = load - se->avg.load_avg;
> +
> + /* Nothing to update */
> + if (!delta)
> + return;
> +
> + /* Set new sched_entity's load */
> + se->avg.load_avg = load;
> + se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX;
> +
> + /* Update parent cfs_rq load */
> + add_positive(&cfs_rq->avg.load_avg, delta);
> + cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX;
> +
> + /*
> + * If the sched_entity is already enqueued, we also have to update the
> + * runnable load avg.
> + */
> + if (se->on_rq) {
> + /* Update parent cfs_rq runnable_load_avg */
> + add_positive(&cfs_rq->runnable_load_avg, delta);
> + cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX;
> + }
> +}
Powered by blists - more mailing lists