[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20220608042605.60720-1-zhouchengming@bytedance.com>
Date: Wed, 8 Jun 2022 12:26:05 +0800
From: Chengming Zhou <zhouchengming@...edance.com>
To: mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, vschneid@...hat.com
Cc: linux-kernel@...r.kernel.org, duanxiongchun@...edance.com,
songmuchun@...edance.com,
Chengming Zhou <zhouchengming@...edance.com>
Subject: [PATCH] sched/fair: combine detach into dequeue when migrating task
When we are migrating task out of the CPU, we can combine detach
into dequeue_entity() to save the independent detach_entity_cfs_rq()
in migrate_task_rq_fair().
This optimization is like combining DO_ATTACH in the enqueue_entity()
when migrating task to the CPU.
So we don't have to traverse the CFS tree twice to do these load
detach and propagation.
Signed-off-by: Chengming Zhou <zhouchengming@...edance.com>
---
kernel/sched/fair.c | 33 ++++++++++++++++++++++-----------
1 file changed, 22 insertions(+), 11 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e0cd4052e32f..1db7c97a5fe6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3896,6 +3896,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
#define UPDATE_TG 0x1
#define SKIP_AGE_LOAD 0x2
#define DO_ATTACH 0x4
+#define DO_DETACH 0x8
/* Update task and its cfs_rq load average */
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -3913,7 +3914,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se);
- if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
+ if (flags & DO_DETACH) {
+ /*
+ * DO_DETACH means we're here from dequeue_entity()
+ * and we are migrating task out of the CPU.
+ */
+ detach_entity_load_avg(cfs_rq, se);
+ update_tg_load_avg(cfs_rq);
+ } else if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
/*
* DO_ATTACH means we're here from enqueue_entity().
@@ -4426,6 +4434,14 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
+ int action = UPDATE_TG;
+
+ /*
+ * If we are migrating task from the CPU, detach load_avg when dequeue.
+ */
+ if (entity_is_task(se) && task_of(se)->on_rq == TASK_ON_RQ_MIGRATING)
+ action |= DO_DETACH;
+
/*
* Update run-time statistics of the 'current'.
*/
@@ -4440,7 +4456,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
- update_load_avg(cfs_rq, se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, action);
se_update_runnable(se);
update_stats_dequeue_fair(cfs_rq, se, flags);
@@ -6940,15 +6956,10 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
se->vruntime -= min_vruntime;
}
- if (p->on_rq == TASK_ON_RQ_MIGRATING) {
- /*
- * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
- * rq->lock and can modify state directly.
- */
- lockdep_assert_rq_held(task_rq(p));
- detach_entity_cfs_rq(&p->se);
-
- } else {
+ /*
+ * In case of TASK_ON_RQ_MIGRATING we already detach in dequeue_entity.
+ */
+ if (p->on_rq != TASK_ON_RQ_MIGRATING) {
/*
* We are supposed to update the task to "current" time, then
* its up to date and ready to go to new CPU/cfs_rq. But we
--
2.36.1
Powered by blists - more mailing lists