lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171028095941.4773-2-joelaf@google.com>
Date:   Sat, 28 Oct 2017 02:59:37 -0700
From:   Joel Fernandes <joelaf@...gle.com>
To:     linux-kernel@...r.kernel.org
Cc:     Joel Fernandes <joelaf@...gle.com>,
        "Rafael J . Wysocki" <rjw@...ysocki.net>,
        Viresh Kumar <viresh.kumar@...aro.org>,
        Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        "Cc: Srinivas Pandruvada" <srinivas.pandruvada@...ux.intel.com>,
        "Cc: Len Brown" <lenb@...nel.org>,
        "Cc: Juri Lelli" <juri.lelli@....com>,
        "Cc: Patrick Bellasi" <patrick.bellasi@....com>,
        "Cc: Steve Muckle" <smuckle@...gle.com>,
        "Cc: Brendan Jackman" <brendan.jackman@....com>,
        "Cc: Chris Redpath" <Chris.Redpath@....com>,
        "Cc: Atish Patra" <atish.patra@...cle.com>,
        "Cc: Dietmar Eggemann" <dietmar.eggemann@....com>,
        "Cc: Vincent Guittot" <vincent.guittot@...aro.org>,
        "Cc: Morten Ramussen" <morten.rasmussen@....com>,
        "Cc: Frederic Weisbecker" <fweisbec@...il.com>,
        "Cc: Thomas Gleixner" <tglx@...utronix.de>,
        "Cc: EAS Dev" <eas-dev@...ts.linaro.org>,
        "Cc: Android Kernel" <kernel-team@...roid.com>
Subject: [PATCH RFC 1/5] Revert "sched/fair: Drop always true parameter of update_cfs_rq_load_avg()"

This reverts commit 3a123bbbb10d54dbdde6ccbbd519c74c91ba2f52.

It needs a revert for controlling whether cpufreq is notified about
updating frequency during an update to the utilization.

Cc: Rafael J. Wysocki <rjw@...ysocki.net>
Cc: Viresh Kumar <viresh.kumar@...aro.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Joel Fernandes <joelaf@...gle.com>
---
 kernel/sched/fair.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 56f343b8e749..f97693fe8b6e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -784,7 +784,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
 			/*
 			 * For !fair tasks do:
 			 *
-			update_cfs_rq_load_avg(now, cfs_rq);
+			update_cfs_rq_load_avg(now, cfs_rq, false);
 			attach_entity_load_avg(cfs_rq, se);
 			switched_from_fair(rq, p);
 			 *
@@ -3596,6 +3596,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum
  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
  * @now: current time, as per cfs_rq_clock_task()
  * @cfs_rq: cfs_rq to update
+ * @update_freq: should we call cfs_rq_util_change() or will the call do so
  *
  * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
  * avg. The immediate corollary is that all (fair) tasks must be attached, see
@@ -3609,7 +3610,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum
  * call update_tg_load_avg() when this function returns true.
  */
 static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 {
 	unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0;
 	struct sched_avg *sa = &cfs_rq->avg;
@@ -3646,7 +3647,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 	cfs_rq->load_last_update_time_copy = sa->last_update_time;
 #endif
 
-	if (decayed)
+	if (update_freq && decayed)
 		cfs_rq_util_change(cfs_rq);
 
 	return decayed;
@@ -3740,7 +3741,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
 		__update_load_avg_se(now, cpu, cfs_rq, se);
 
-	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
+	decayed  = update_cfs_rq_load_avg(now, cfs_rq, true);
 	decayed |= propagate_entity_load_avg(se);
 
 	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
@@ -3830,7 +3831,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
 #else /* CONFIG_SMP */
 
 static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 {
 	return 0;
 }
@@ -7318,7 +7319,7 @@ static void update_blocked_averages(int cpu)
 		if (throttled_hierarchy(cfs_rq))
 			continue;
 
-		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
 			update_tg_load_avg(cfs_rq, 0);
 
 		/* Propagate pending load changes to the parent, if any: */
@@ -7391,7 +7392,7 @@ static inline void update_blocked_averages(int cpu)
 
 	rq_lock_irqsave(rq, &rf);
 	update_rq_clock(rq);
-	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
 	rq_unlock_irqrestore(rq, &rf);
 }
 
-- 
2.15.0.rc2.357.g7e34df9404-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ