[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170903201542.2929-2-joelaf@google.com>
Date: Sun, 3 Sep 2017 13:15:41 -0700
From: Joel Fernandes <joelaf@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: Joel Fernandes <joelaf@...gle.com>,
Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
Len Brown <lenb@...nel.org>,
"Rafael J . Wysocki" <rjw@...ysocki.net>,
Viresh Kumar <viresh.kumar@...aro.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@....com>,
Patrick Bellasi <patrick.bellasi@....com>,
Steve Muckle <smuckle@...gle.com>, kernel-team@...roid.com
Subject: [PATCH RFC v2 1/2] Revert "sched/fair: Drop always true parameter of update_cfs_rq_load_avg()"
This reverts commit 3a123bbbb10d54dbdde6ccbbd519c74c91ba2f52.
Its needed by the series for controlling whether cpufreq is notified about
updating frequency during an update to the utilization.
Cc: Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
Cc: Len Brown <lenb@...nel.org>
Cc: Rafael J. Wysocki <rjw@...ysocki.net>
Cc: Viresh Kumar <viresh.kumar@...aro.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Juri Lelli <juri.lelli@....com>
Cc: Patrick Bellasi <patrick.bellasi@....com>
Cc: Steve Muckle <smuckle@...gle.com>
Cc: kernel-team@...roid.com
Signed-off-by: Joel Fernandes <joelaf@...gle.com>
---
kernel/sched/fair.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index eca6a57527f9..bf3595c0badf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -797,7 +797,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
/*
* For !fair tasks do:
*
- update_cfs_rq_load_avg(now, cfs_rq);
+ update_cfs_rq_load_avg(now, cfs_rq, false);
attach_entity_load_avg(cfs_rq, se);
switched_from_fair(rq, p);
*
@@ -3607,6 +3607,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum
* update_cfs_rq_load_avg - update the cfs_rq's load/util averages
* @now: current time, as per cfs_rq_clock_task()
* @cfs_rq: cfs_rq to update
+ * @update_freq: should we call cfs_rq_util_change() or will the call do so
*
* The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
* avg. The immediate corollary is that all (fair) tasks must be attached, see
@@ -3620,7 +3621,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum
* call update_tg_load_avg() when this function returns true.
*/
static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
{
unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0;
struct sched_avg *sa = &cfs_rq->avg;
@@ -3657,7 +3658,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
- if (decayed)
+ if (update_freq && decayed)
cfs_rq_util_change(cfs_rq);
return decayed;
@@ -3751,7 +3752,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
__update_load_avg_se(now, cpu, cfs_rq, se);
- decayed = update_cfs_rq_load_avg(now, cfs_rq);
+ decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
decayed |= propagate_entity_load_avg(se);
if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
@@ -3841,7 +3842,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
#else /* CONFIG_SMP */
static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
{
return 0;
}
@@ -7331,7 +7332,7 @@ static void update_blocked_averages(int cpu)
if (throttled_hierarchy(cfs_rq))
continue;
- if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+ if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
update_tg_load_avg(cfs_rq, 0);
/* Propagate pending load changes to the parent, if any: */
@@ -7404,7 +7405,7 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
- update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+ update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
rq_unlock_irqrestore(rq, &rf);
}
--
2.14.1.581.gf28d330327-goog
Powered by blists - more mailing lists