[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-a05e8c51ff097ff73ec2947631d9102283545f7c@git.kernel.org>
Date: Sun, 13 Sep 2015 03:58:40 -0700
From: tip-bot for Byungchul Park <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: tglx@...utronix.de, mingo@...nel.org, linux-kernel@...r.kernel.org,
peterz@...radead.org, hpa@...or.com, torvalds@...ux-foundation.org,
byungchul.park@....com, efault@....de
Subject: [tip:sched/core] sched/fair: Factor out the {at,
de}taching of the per entity load {to,from} the runqueue
Commit-ID: a05e8c51ff097ff73ec2947631d9102283545f7c
Gitweb: http://git.kernel.org/tip/a05e8c51ff097ff73ec2947631d9102283545f7c
Author: Byungchul Park <byungchul.park@....com>
AuthorDate: Thu, 20 Aug 2015 20:21:56 +0900
Committer: Ingo Molnar <mingo@...nel.org>
CommitDate: Sun, 13 Sep 2015 09:52:45 +0200
sched/fair: Factor out the {at,de}taching of the per entity load {to,from} the runqueue
Currently we open-code the addition/subtraction of the per entity load
to/from the runqueue, factor this out into helper functions.
Signed-off-by: Byungchul Park <byungchul.park@....com>
[ Rewrote the changelog. ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Mike Galbraith <efault@....de>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: yuyang.du@...el.com
Link: http://lkml.kernel.org/r/1440069720-27038-2-git-send-email-byungchul.park@lge.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
kernel/sched/fair.c | 77 ++++++++++++++++++++++++++---------------------------
1 file changed, 38 insertions(+), 39 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6e2e348..a72a71b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2664,8 +2664,8 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
- int decayed;
struct sched_avg *sa = &cfs_rq->avg;
+ int decayed;
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
@@ -2695,33 +2695,52 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
static inline void update_load_avg(struct sched_entity *se, int update_tg)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- int cpu = cpu_of(rq_of(cfs_rq));
u64 now = cfs_rq_clock_task(cfs_rq);
+ int cpu = cpu_of(rq_of(cfs_rq));
/*
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
__update_load_avg(now, cpu, &se->avg,
- se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
+ se->on_rq * scale_load_down(se->load.weight),
+ cfs_rq->curr == se, NULL);
if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
update_tg_load_avg(cfs_rq, 0);
}
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ se->avg.last_update_time = cfs_rq->avg.last_update_time;
+ cfs_rq->avg.load_avg += se->avg.load_avg;
+ cfs_rq->avg.load_sum += se->avg.load_sum;
+ cfs_rq->avg.util_avg += se->avg.util_avg;
+ cfs_rq->avg.util_sum += se->avg.util_sum;
+}
+
+static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
+ &se->avg, se->on_rq * scale_load_down(se->load.weight),
+ cfs_rq->curr == se, NULL);
+
+ cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
+ cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
+ cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
+ cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+}
+
/* Add the load generated by se into cfs_rq's load average */
static inline void
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_avg *sa = &se->avg;
u64 now = cfs_rq_clock_task(cfs_rq);
- int migrated = 0, decayed;
+ int migrated, decayed;
- if (sa->last_update_time == 0) {
- sa->last_update_time = now;
- migrated = 1;
- }
- else {
+ migrated = !sa->last_update_time;
+ if (!migrated) {
__update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
@@ -2732,12 +2751,8 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq->runnable_load_avg += sa->load_avg;
cfs_rq->runnable_load_sum += sa->load_sum;
- if (migrated) {
- cfs_rq->avg.load_avg += sa->load_avg;
- cfs_rq->avg.load_sum += sa->load_sum;
- cfs_rq->avg.util_avg += sa->util_avg;
- cfs_rq->avg.util_sum += sa->util_sum;
- }
+ if (migrated)
+ attach_entity_load_avg(cfs_rq, se);
if (decayed || migrated)
update_tg_load_avg(cfs_rq, 0);
@@ -2752,7 +2767,7 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq->runnable_load_avg =
max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
cfs_rq->runnable_load_sum =
- max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
+ max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
}
/*
@@ -2820,6 +2835,11 @@ static inline void
dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void remove_entity_load_avg(struct sched_entity *se) {}
+static inline void
+attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void
+detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+
static inline int idle_balance(struct rq *rq)
{
return 0;
@@ -7909,25 +7929,10 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
}
-#ifdef CONFIG_SMP
/* Catch up with the cfs_rq and remove our load when we leave */
- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq), &se->avg,
- se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
-
- cfs_rq->avg.load_avg =
- max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
- cfs_rq->avg.load_sum =
- max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
- cfs_rq->avg.util_avg =
- max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
- cfs_rq->avg.util_sum =
- max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
-#endif
+ detach_entity_load_avg(cfs_rq, se);
}
-/*
- * We switched to the sched_fair class.
- */
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -8040,14 +8045,8 @@ static void task_move_group_fair(struct task_struct *p, int queued)
cfs_rq = cfs_rq_of(se);
se->vruntime += cfs_rq->min_vruntime;
-#ifdef CONFIG_SMP
/* Virtually synchronize task with its new cfs_rq */
- p->se.avg.last_update_time = cfs_rq->avg.last_update_time;
- cfs_rq->avg.load_avg += p->se.avg.load_avg;
- cfs_rq->avg.load_sum += p->se.avg.load_sum;
- cfs_rq->avg.util_avg += p->se.avg.util_avg;
- cfs_rq->avg.util_sum += p->se.avg.util_sum;
-#endif
+ attach_entity_load_avg(cfs_rq, se);
}
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists