lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0575c014-6fe7-4118-bae8-cbb5b303a390@arm.com>
Date: Mon, 22 Jul 2024 08:34:02 +0200
From: Dietmar Eggemann <dietmar.eggemann@....com>
To: Chuyi Zhou <zhouchuyi@...edance.com>, mingo@...hat.com,
 peterz@...radead.org, juri.lelli@...hat.com, vincent.guittot@...aro.org,
 rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de, vschneid@...hat.com
Cc: chengming.zhou@...ux.dev, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2] sched/fair: Sync se's load_avg with cfs_rq in
 reweight_task

On 20/07/2024 07:12, Chuyi Zhou wrote:
> In reweight_task(), there are two situations:
> 
> 1. The task was on_rq, then the task's load_avg is accurate because we
> synchronized it with cfs_rq through update_load_avg() in dequeue_task().
> 
> 2. The task is sleeping, its load_avg might not have been updated for some
> time, which can result in inaccurate dequeue_load_avg() in
> reweight_entity().
> 
> This patch solves this by using update_load_avg() to synchronize the
> load_avg of se with cfs_rq. For tasks were on_rq, since we already update
> load_avg to accurate values in dequeue_task(), this change will not have
> other effects due to the short time interval between the two updates.
> 
> Signed-off-by: Chuyi Zhou <zhouchuyi@...edance.com>
> ---
> Changes in v2:
> - change the description in commit log.
> - use update_load_avg() in reweight_task() rather than in reweight_entity
> suggested by chengming.
> - Link to v1: https://lore.kernel.org/lkml/20240716150840.23061-1-zhouchuyi@bytedance.com/
> ---
>  kernel/sched/fair.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 9057584ec06d..b1e07ce90284 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3835,12 +3835,15 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
>  	}
>  }
>  
> +static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
> +
>  void reweight_task(struct task_struct *p, const struct load_weight *lw)
>  {
>  	struct sched_entity *se = &p->se;
>  	struct cfs_rq *cfs_rq = cfs_rq_of(se);
>  	struct load_weight *load = &se->load;
>  
> +	update_load_avg(cfs_rq, se, 0);

IIUC, you only want to sync the sleeping task with its cfs_rq. IMHO, 
sync_entity_load_avg() should be used here instead of update_load_avg(). 
The latter is doing much more than this.

>  	reweight_entity(cfs_rq, se, lw->weight);
>  	load->inv_weight = lw->inv_weight;
>  }

Maybe even do this in reweight_entity()?. You would have to do it under 
'if (!se->on_rq) in reweight_task() anyway I assume.

-->8--

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9057584ec06d..555392be4e82 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3669,11 +3669,31 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
        cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
                                          cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
 }
+
+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
+{
+       return u64_u32_load_copy(cfs_rq->avg.last_update_time,
+                                cfs_rq->last_update_time_copy);
+}
+
+/*
+ * Synchronize entity load avg of dequeued entity without locking
+ * the previous rq.
+ */
+static void sync_entity_load_avg(struct sched_entity *se)
+{
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 last_update_time;
+
+       last_update_time = cfs_rq_last_update_time(cfs_rq);
+       __update_load_avg_blocked_se(last_update_time, se);
+}
 #else
 static inline void
 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
 static inline void
 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static void sync_entity_load_avg(struct sched_entity *se) { }
 #endif
 
 static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
@@ -3795,7 +3815,10 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                if (!curr)
                        __dequeue_entity(cfs_rq, se);
                update_load_sub(&cfs_rq->load, se->load.weight);
+       } else if (entity_is_task(se)) {
+               sync_entity_load_avg(se);
        }
+
        dequeue_load_avg(cfs_rq, se);
 
        if (se->on_rq) {
@@ -4033,12 +4056,6 @@ static inline bool load_avg_is_decayed(struct sched_avg *sa)
 
        return true;
 }
-
-static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
-{
-       return u64_u32_load_copy(cfs_rq->avg.last_update_time,
-                                cfs_rq->last_update_time_copy);
-}
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /*
  * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
@@ -4773,19 +4790,6 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
        }
 }
 
-/*
- * Synchronize entity load avg of dequeued entity without locking
- * the previous rq.
- */
-static void sync_entity_load_avg(struct sched_entity *se)
-{
-       struct cfs_rq *cfs_rq = cfs_rq_of(se);
-       u64 last_update_time;
-
-       last_update_time = cfs_rq_last_update_time(cfs_rq);
-       __update_load_avg_blocked_se(last_update_time, se);
-}
-
 /*
  * Task first catches up with cfs_rq, and then subtract
  * itself from the cfs_rq (task must be off the queue now).


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ