[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1476695653-12309-2-git-send-email-vincent.guittot@linaro.org>
Date: Mon, 17 Oct 2016 11:14:08 +0200
From: Vincent Guittot <vincent.guittot@...aro.org>
To: peterz@...radead.org, mingo@...nel.org,
linux-kernel@...r.kernel.org, dietmar.eggemann@....com
Cc: yuyang.du@...el.com, Morten.Rasmussen@....com,
linaro-kernel@...ts.linaro.org, pjt@...gle.com, bsegall@...gle.com,
kernellwp@...il.com, Vincent Guittot <vincent.guittot@...aro.org>
Subject: [PATCH 1/6 v5] sched: factorize attach entity
Factorize post_init_entity_util_avg and part of attach_task_cfs_rq
in one function attach_entity_cfs_rq
Signed-off-by: Vincent Guittot <vincent.guittot@...aro.org>
---
kernel/sched/fair.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 543b2f2..584b86f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -701,9 +701,7 @@ void init_entity_runnable_average(struct sched_entity *se)
}
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
-static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
-static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
+static void attach_entity_cfs_rq(struct sched_entity *se);
/*
* With new tasks being created, their initial util_avgs are extrapolated
@@ -735,7 +733,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
- u64 now = cfs_rq_clock_task(cfs_rq);
if (cap > 0) {
if (cfs_rq->avg.util_avg != 0) {
@@ -763,14 +760,12 @@ void post_init_entity_util_avg(struct sched_entity *se)
* such that the next switched_to_fair() has the
* expected state.
*/
- se->avg.last_update_time = now;
+ se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
return;
}
}
- update_cfs_rq_load_avg(now, cfs_rq, false);
- attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, false);
+ attach_entity_cfs_rq(se);
}
#else /* !CONFIG_SMP */
@@ -8697,9 +8692,8 @@ static void detach_task_cfs_rq(struct task_struct *p)
update_tg_load_avg(cfs_rq, false);
}
-static void attach_task_cfs_rq(struct task_struct *p)
+static void attach_entity_cfs_rq(struct sched_entity *se)
{
- struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 now = cfs_rq_clock_task(cfs_rq);
@@ -8711,10 +8705,18 @@ static void attach_task_cfs_rq(struct task_struct *p)
se->depth = se->parent ? se->parent->depth + 1 : 0;
#endif
- /* Synchronize task with its cfs_rq */
+ /* Synchronize entity with its cfs_rq */
update_cfs_rq_load_avg(now, cfs_rq, false);
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
+}
+
+static void attach_task_cfs_rq(struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ attach_entity_cfs_rq(se);
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
--
2.7.4
Powered by blists - more mailing lists