lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 19 Jul 2023 21:29:14 +0800
From:   Aaron Lu <aaron.lu@...el.com>
To:     Vincent Guittot <vincent.guittot@...aro.org>
CC:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Juri Lelli <juri.lelli@...hat.com>,
        Daniel Jordan <daniel.m.jordan@...cle.com>,
        Dietmar Eggemann <dietmar.eggemann@....com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Ben Segall <bsegall@...gle.com>,
        "Mel Gorman" <mgorman@...e.de>,
        Daniel Bristot de Oliveira <bristot@...hat.com>,
        Valentin Schneider <vschneid@...hat.com>,
        Tim Chen <tim.c.chen@...el.com>,
        Nitin Tekchandani <nitin.tekchandani@...el.com>,
        Yu Chen <yu.c.chen@...el.com>,
        Waiman Long <longman@...hat.com>,
        <linux-kernel@...r.kernel.org>
Subject: Re: [RFC PATCH 3/4] sched/fair: delay update_tg_load_avg() for
 cfs_rq's removed load

On Wed, Jul 19, 2023 at 11:47:06AM +0200, Vincent Guittot wrote:
> On Wed, 19 Jul 2023 at 10:01, Aaron Lu <aaron.lu@...el.com> wrote:
> >
> > On Wed, Jul 19, 2023 at 01:18:26PM +0800, Aaron Lu wrote:
> > > Alternatively, I can remove some callsites of update_tg_load_avg() like
> > > you suggested below and only call update_tg_load_avg() when cfs_rq is
> > > decayed(really just decayed, not when it detected it has removed load
> > > pending or load propagated from its children). This way it would give us
> > > similar result as above(roughly once per ms).
> >
> > Something like this: (I think this is better since it removed those
> > unnecessary calls to update_tg_load_avg(), although it is inline but
> > still)
> >
> >
> > From bc749aaefa6bed36aa946921a4006b3dddb69b77 Mon Sep 17 00:00:00 2001
> > From: Aaron Lu <aaron.lu@...el.com>
> > Date: Wed, 19 Jul 2023 13:54:48 +0800
> > Subject: [PATCH] sched/fair: only update_tg_load_avg() when cfs_rq decayed
> >
> > ---
> >  kernel/sched/fair.c | 22 +++++++---------------
> >  1 file changed, 7 insertions(+), 15 deletions(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index a80a73909dc2..7d5b7352b8b5 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -3913,16 +3913,16 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum
> >  }
> >
> >  /* Update task and its cfs_rq load average */
> > -static inline int propagate_entity_load_avg(struct sched_entity *se)
> > +static inline void propagate_entity_load_avg(struct sched_entity *se)
> >  {
> >         struct cfs_rq *cfs_rq, *gcfs_rq;
> >
> >         if (entity_is_task(se))
> > -               return 0;
> > +               return;
> >
> >         gcfs_rq = group_cfs_rq(se);
> >         if (!gcfs_rq->propagate)
> > -               return 0;
> > +               return;
> >
> >         gcfs_rq->propagate = 0;
> >
> > @@ -3936,8 +3936,6 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
> >
> >         trace_pelt_cfs_tp(cfs_rq);
> >         trace_pelt_se_tp(se);
> > -
> > -       return 1;
> >  }
> >
> >  /*
> > @@ -3974,9 +3972,8 @@ static inline bool skip_blocked_update(struct sched_entity *se)
> >
> >  static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
> >
> > -static inline int propagate_entity_load_avg(struct sched_entity *se)
> > +static inline void propagate_entity_load_avg(struct sched_entity *se)
> >  {
> > -       return 0;
> >  }
> >
> >  static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
> > @@ -4086,7 +4083,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
> >  {
> >         unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
> >         struct sched_avg *sa = &cfs_rq->avg;
> > -       int decayed = 0;
> > +       int decayed;
> >
> >         if (cfs_rq->removed.nr) {
> >                 unsigned long r;
> > @@ -4134,11 +4131,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
> >                  */
> >                 add_tg_cfs_propagate(cfs_rq,
> >                         -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
> > -
> > -               decayed = 1;
> >         }
> >
> > -       decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
> > +       decayed = __update_load_avg_cfs_rq(now, cfs_rq);
> >         u64_u32_store_copy(sa->last_update_time,
> >                            cfs_rq->last_update_time_copy,
> >                            sa->last_update_time);
> > @@ -4252,7 +4247,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
> >                 __update_load_avg_se(now, cfs_rq, se);
> >
> >         decayed  = update_cfs_rq_load_avg(now, cfs_rq);
> > -       decayed |= propagate_entity_load_avg(se);
> > +       propagate_entity_load_avg(se);
> 
> but then you  also skip the call to cfs_rq_util_change()

Ah right, I missed that, thanks for catching this.

Updated:

>From 09a649f8111cfca656b7b735da975ef607b00956 Mon Sep 17 00:00:00 2001
From: Aaron Lu <aaron.lu@...el.com>
Date: Wed, 19 Jul 2023 13:54:48 +0800
Subject: [PATCH] sched/fair: only update_tg_load_avg() when cfs_rq decayed

---
 kernel/sched/fair.c | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a80a73909dc2..8d4b9e0a19b6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4086,7 +4086,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 {
 	unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
 	struct sched_avg *sa = &cfs_rq->avg;
-	int decayed = 0;
+	int decayed;
 
 	if (cfs_rq->removed.nr) {
 		unsigned long r;
@@ -4134,11 +4134,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 		 */
 		add_tg_cfs_propagate(cfs_rq,
 			-(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
-
-		decayed = 1;
 	}
 
-	decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
+	decayed = __update_load_avg_cfs_rq(now, cfs_rq);
 	u64_u32_store_copy(sa->last_update_time,
 			   cfs_rq->last_update_time_copy,
 			   sa->last_update_time);
@@ -4242,7 +4240,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
 	u64 now = cfs_rq_clock_pelt(cfs_rq);
-	int decayed;
+	int decayed, propagated;
 
 	/*
 	 * Track task load average for carrying it to new CPU after migrated, and
@@ -4252,7 +4250,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 		__update_load_avg_se(now, cfs_rq, se);
 
 	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
-	decayed |= propagate_entity_load_avg(se);
+	propagated = propagate_entity_load_avg(se);
 
 	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
 
@@ -4264,19 +4262,16 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 		 * IOW we're enqueueing a task on a new CPU.
 		 */
 		attach_entity_load_avg(cfs_rq, se);
-		update_tg_load_avg(cfs_rq);
-
 	} else if (flags & DO_DETACH) {
 		/*
 		 * DO_DETACH means we're here from dequeue_entity()
 		 * and we are migrating task out of the CPU.
 		 */
 		detach_entity_load_avg(cfs_rq, se);
-		update_tg_load_avg(cfs_rq);
-	} else if (decayed) {
+	} else if (decayed || propagated) {
 		cfs_rq_util_change(cfs_rq, 0);
 
-		if (flags & UPDATE_TG)
+		if (decayed && (flags & UPDATE_TG))
 			update_tg_load_avg(cfs_rq);
 	}
 }
-- 
2.41.0


> >
> >         if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
> >
> > @@ -4264,15 +4259,12 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
> >                  * IOW we're enqueueing a task on a new CPU.
> >                  */
> >                 attach_entity_load_avg(cfs_rq, se);
> > -               update_tg_load_avg(cfs_rq);
> > -
> >         } else if (flags & DO_DETACH) {
> >                 /*
> >                  * DO_DETACH means we're here from dequeue_entity()
> >                  * and we are migrating task out of the CPU.
> >                  */
> >                 detach_entity_load_avg(cfs_rq, se);
> > -               update_tg_load_avg(cfs_rq);
> >         } else if (decayed) {
> >                 cfs_rq_util_change(cfs_rq, 0);
> >
> > --
> > 2.41.0
> >

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ