[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e9638a3f-baa0-4abb-bb61-481c539c2181@bytedance.com>
Date: Thu, 2 Nov 2023 14:27:33 +0800
From: Abel Wu <wuyun.abel@...edance.com>
To: s921975628@...il.com, mingo@...hat.com, peterz@...radead.org
Cc: vincent.guittot@...aro.org, dietmar.eggemann@....com,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] sched/fair: Track current se's EEVDF parameters
On 11/2/23 1:44 AM, s921975628@...il.com Wrote:
> From: Yiwei Lin <s921975628@...il.com>
>
> After dequeuing the current-picked scheduling entity with
> `__dequeue_entity`, its contribution to the EEVDF parameters
> cfs_rq->avg_vruntime and cfs_rq->avg_load are also removed.
> Because these should in fact be considered for the EEVDF algorithm,
> we took curr as the special case and inserted back the contributions
> when requests for cfs_rq->avg_vruntime and cfs_rq->avg_load.
Being 'curr' means its vruntime is increasing, so does its
contribution to avg_vruntime. And you failed to explain the
most important part that how to commit its contribution to
avg_vruntime (specifically in update_curr()).
Regards,
Abel
>
> Functions like `entity_eligible` which is called insied a loop may
> therefore recalculate these statistics repeatly and require more effort.
> Instead, we could just avoid to remove these statistics from
> cfs_rq->avg_vruntime and cfs_rq->avg_load directly.
>
> Signed-off-by: Yiwei Lin <s921975628@...il.com>
> ---
> kernel/sched/fair.c | 34 +++++++---------------------------
> 1 file changed, 7 insertions(+), 27 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 876798824..d507ade09 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -655,17 +655,9 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
> */
> u64 avg_vruntime(struct cfs_rq *cfs_rq)
> {
> - struct sched_entity *curr = cfs_rq->curr;
> s64 avg = cfs_rq->avg_vruntime;
> long load = cfs_rq->avg_load;
>
> - if (curr && curr->on_rq) {
> - unsigned long weight = scale_load_down(curr->load.weight);
> -
> - avg += entity_key(cfs_rq, curr) * weight;
> - load += weight;
> - }
> -
> if (load) {
> /* sign flips effective floor / ceil */
> if (avg < 0)
> @@ -722,17 +714,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
> */
> int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
> {
> - struct sched_entity *curr = cfs_rq->curr;
> s64 avg = cfs_rq->avg_vruntime;
> long load = cfs_rq->avg_load;
>
> - if (curr && curr->on_rq) {
> - unsigned long weight = scale_load_down(curr->load.weight);
> -
> - avg += entity_key(cfs_rq, curr) * weight;
> - load += weight;
> - }
> -
> return avg >= entity_key(cfs_rq, se) * load;
> }
>
> @@ -821,11 +805,12 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> __entity_less, &min_deadline_cb);
> }
>
> -static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> +static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool on_rq)
> {
> rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
> &min_deadline_cb);
> - avg_vruntime_sub(cfs_rq, se);
> + if (!on_rq)
> + avg_vruntime_sub(cfs_rq, se);
> }
>
> struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
> @@ -3675,8 +3660,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
> /* commit outstanding execution time */
> if (cfs_rq->curr == se)
> update_curr(cfs_rq);
> - else
> - avg_vruntime_sub(cfs_rq, se);
> + avg_vruntime_sub(cfs_rq, se);
> update_load_sub(&cfs_rq->load, se->load.weight);
> }
> dequeue_load_avg(cfs_rq, se);
> @@ -3712,8 +3696,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
> enqueue_load_avg(cfs_rq, se);
> if (se->on_rq) {
> update_load_add(&cfs_rq->load, se->load.weight);
> - if (cfs_rq->curr != se)
> - avg_vruntime_add(cfs_rq, se);
> + avg_vruntime_add(cfs_rq, se);
> }
> }
>
> @@ -5023,7 +5006,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> * EEVDF: placement strategy #1 / #2
> */
> if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
> - struct sched_entity *curr = cfs_rq->curr;
> unsigned long load;
>
> lag = se->vlag;
> @@ -5081,8 +5063,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> * vl_i = (W + w_i)*vl'_i / W
> */
> load = cfs_rq->avg_load;
> - if (curr && curr->on_rq)
> - load += scale_load_down(curr->load.weight);
>
> lag *= load + scale_load_down(se->load.weight);
> if (WARN_ON_ONCE(!load))
> @@ -5229,7 +5209,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>
> update_entity_lag(cfs_rq, se);
> if (se != cfs_rq->curr)
> - __dequeue_entity(cfs_rq, se);
> + __dequeue_entity(cfs_rq, se, 0);
> se->on_rq = 0;
> account_entity_dequeue(cfs_rq, se);
>
> @@ -5264,7 +5244,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> * runqueue.
> */
> update_stats_wait_end_fair(cfs_rq, se);
> - __dequeue_entity(cfs_rq, se);
> + __dequeue_entity(cfs_rq, se, 1);
> update_load_avg(cfs_rq, se, UPDATE_TG);
> /*
> * HACK, stash a copy of deadline at the point of pick in vlag,
Powered by blists - more mailing lists