[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240727105030.514088302@infradead.org>
Date: Sat, 27 Jul 2024 12:27:51 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...hat.com,
peterz@...radead.org,
juri.lelli@...hat.com,
vincent.guittot@...aro.org,
dietmar.eggemann@....com,
rostedt@...dmis.org,
bsegall@...gle.com,
mgorman@...e.de,
vschneid@...hat.com,
linux-kernel@...r.kernel.org
Cc: kprateek.nayak@....com,
wuyun.abel@...edance.com,
youssefesmat@...omium.org,
tglx@...utronix.de,
efault@....de
Subject: [PATCH 19/24] sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE
Note that tasks that are kept on the runqueue to burn off negative
lag, are not in fact runnable anymore, they'll get dequeued the moment
they get picked.
As such, don't count this time towards runnable.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/sched/fair.c | 2 ++
kernel/sched/sched.h | 6 ++++++
2 files changed, 8 insertions(+)
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5388,6 +5388,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
if (cfs_rq->next == se)
cfs_rq->next = NULL;
se->sched_delayed = 1;
+ update_load_avg(cfs_rq, se, 0);
return false;
}
}
@@ -6814,6 +6815,7 @@ requeue_delayed_entity(struct sched_enti
}
se->sched_delayed = 0;
+ update_load_avg(cfs_rq, se, 0);
}
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -816,6 +816,9 @@ static inline void se_update_runnable(st
static inline long se_runnable(struct sched_entity *se)
{
+ if (se->sched_delayed)
+ return false;
+
if (entity_is_task(se))
return !!se->on_rq;
else
@@ -830,6 +833,9 @@ static inline void se_update_runnable(st
static inline long se_runnable(struct sched_entity *se)
{
+ if (se->sched_delayed)
+ return false;
+
return !!se->on_rq;
}
Powered by blists - more mailing lists