[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160621114335.GQ30909@twins.programming.kicks-ass.net>
Date: Tue, 21 Jun 2016 13:43:35 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Vincent Guittot <vincent.guittot@...aro.org>
Cc: Yuyang Du <yuyang.du@...el.com>, Ingo Molnar <mingo@...nel.org>,
linux-kernel <linux-kernel@...r.kernel.org>,
Mike Galbraith <umgwanakikbuti@...il.com>,
Benjamin Segall <bsegall@...gle.com>,
Paul Turner <pjt@...gle.com>,
Morten Rasmussen <morten.rasmussen@....com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Matt Fleming <matt@...eblueprint.co.uk>
Subject: Re: [PATCH 4/4] sched,fair: Fix PELT integrity for new tasks
On Mon, Jun 20, 2016 at 11:23:39AM +0200, Vincent Guittot wrote:
> Don't we have to do a complete attach with attach_task_cfs_rq instead
> of just the load_avg ? to set also depth ?
Hmm, yes, your sched_set_group() change seems to have munged this.
Previously we'd call task_move_group_fair() which would indeed setup
depth.
I've changed it thus (all smaller edits just didn't look right):
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7743,7 +7743,7 @@ void sched_offline_group(struct task_gro
* group pointers. The task will be attached to the runqueue during its wake
* up.
*/
-static void sched_set_group(struct task_struct *tsk, bool move)
+static void sched_change_group(struct task_struct *tsk, int type)
{
struct task_group *tg;
@@ -7758,8 +7758,8 @@ static void sched_set_group(struct task_
tsk->sched_task_group = tg;
#ifdef CONFIG_FAIR_GROUP_SCHED
- if (move && tsk->sched_class->task_move_group)
- tsk->sched_class->task_move_group(tsk);
+ if (tsk->sched_class->task_change_group)
+ tsk->sched_class->task_change_group(tsk, type);
else
#endif
set_task_rq(tsk, task_cpu(tsk));
@@ -7788,7 +7788,7 @@ void sched_move_task(struct task_struct
if (unlikely(running))
put_prev_task(rq, tsk);
- sched_set_group(tsk, true);
+ sched_change_group(tsk, TASK_MOVE_GROUP);
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
@@ -8227,7 +8227,7 @@ static void cpu_cgroup_fork(struct task_
rq = task_rq_lock(task, &rf);
- sched_set_group(task, false);
+ sched_change_group(task, TASK_SET_GROUP);
task_rq_unlock(rq, task, &rf);
}
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8443,6 +8443,14 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
}
#ifdef CONFIG_FAIR_GROUP_SCHED
+static void task_set_group_fair(struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+
+ set_task_rq(p, task_cpu(p));
+ se->depth = se->parent ? se->parent->depth + 1 : 0;
+}
+
static void task_move_group_fair(struct task_struct *p)
{
detach_task_cfs_rq(p);
@@ -8455,6 +8463,19 @@ static void task_move_group_fair(struct
attach_task_cfs_rq(p);
}
+static void task_change_group_fair(struct task_struct *p, int type)
+{
+ switch (type) {
+ case TASK_SET_GROUP:
+ task_set_group_fair(p);
+ break;
+
+ case TASK_MOVE_GROUP:
+ task_move_group_fair(p);
+ break;
+ }
+}
+
void free_fair_sched_group(struct task_group *tg)
{
int i;
@@ -8683,7 +8704,7 @@ const struct sched_class fair_sched_clas
.update_curr = update_curr_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
- .task_move_group = task_move_group_fair,
+ .task_change_group = task_change_group_fair,
#endif
};
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1247,7 +1247,10 @@ struct sched_class {
void (*update_curr) (struct rq *rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*task_move_group) (struct task_struct *p);
+#define TASK_SET_GROUP 0
+#define TASK_MOVE_GROUP 1
+
+ void (*task_change_group) (struct task_struct *p, int type);
#endif
};
Powered by blists - more mailing lists