[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <148767900991.5212.16341443539923627997.stgit@localhost.localdomain>
Date: Tue, 21 Feb 2017 15:10:22 +0300
From: Kirill Tkhai <ktkhai@...tuozzo.com>
To: <peterz@...radead.org>, <mingo@...hat.com>,
<linux-kernel@...r.kernel.org>, <ktkhai@...tuozzo.com>
Subject: [PATCH 1/3] sched/fair: Set sched_entity::depth only when parent
has changed
Currently we set depth in two places, and this confuses a reader.
Task's depth depends on parent's depth and it may change only if
parent has changed. So, set it in set_task_rq(), where parent is set.
(cfs_rq's depth is set in init_tg_cfs_entry() and it's static, so
this change don't touch them).
Signed-off-by: Kirill Tkhai <ktkhai@...tuozzo.com>
---
kernel/sched/fair.c | 11 -----------
kernel/sched/sched.h | 1 +
2 files changed, 1 insertion(+), 11 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e88b35ac157..33b46ca301f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9102,14 +9102,6 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-#ifdef CONFIG_FAIR_GROUP_SCHED
- /*
- * Since the real-depth could have been changed (only FAIR
- * class maintain depth value), reset depth properly.
- */
- se->depth = se->parent ? se->parent->depth + 1 : 0;
-#endif
-
/* Synchronize entity with its cfs_rq */
update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
@@ -9204,10 +9196,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
#ifdef CONFIG_FAIR_GROUP_SCHED
static void task_set_group_fair(struct task_struct *p)
{
- struct sched_entity *se = &p->se;
-
set_task_rq(p, task_cpu(p));
- se->depth = se->parent ? se->parent->depth + 1 : 0;
}
static void task_move_group_fair(struct task_struct *p)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 207fc1f4c764..4a9313f0b5c8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1123,6 +1123,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
p->se.cfs_rq = tg->cfs_rq[cpu];
p->se.parent = tg->se[cpu];
+ p->se.depth = p->se.parent ? p->se.parent->depth + 1 : 0;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
Powered by blists - more mailing lists