[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260121162507.525037175@infradead.org>
Date: Wed, 21 Jan 2026 17:20:11 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: tglx@...utronix.de
Cc: arnd@...db.de,
anna-maria@...utronix.de,
frederic@...nel.org,
peterz@...radead.org,
luto@...nel.org,
mingo@...hat.com,
juri.lelli@...hat.com,
vincent.guittot@...aro.org,
dietmar.eggemann@....com,
rostedt@...dmis.org,
bsegall@...gle.com,
mgorman@...e.de,
vschneid@...hat.com,
linux-kernel@...r.kernel.org,
oliver.sang@...el.com
Subject: [PATCH v2 1/6] sched/eevdf: Fix HRTICK duration
The nominal duration for an EEVDF task to run is until its deadline.
At which point the deadline is moved ahead and a new task selection is
done.
Try and predict the time 'lost' to higher scheduling classes. Since
this is an estimate, the timer can be both early or late. In case it
is early task_tick_fair() will take the !need_resched() path and
restarts the timer.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/sched/fair.c | 55 +++++++++++++++++++++++++++++-----------------------
1 file changed, 31 insertions(+), 24 deletions(-)
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5511,7 +5511,7 @@ static void put_prev_entity(struct cfs_r
}
static void
-entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
/*
* Update run-time statistics of the 'current'.
@@ -5523,17 +5523,6 @@ entity_tick(struct cfs_rq *cfs_rq, struc
*/
update_load_avg(cfs_rq, curr, UPDATE_TG);
update_cfs_group(curr);
-
-#ifdef CONFIG_SCHED_HRTICK
- /*
- * queued ticks are scheduled to match the slice, so don't bother
- * validating it and just reschedule.
- */
- if (queued) {
- resched_curr_lazy(rq_of(cfs_rq));
- return;
- }
-#endif
}
@@ -6735,21 +6724,39 @@ static inline void sched_fair_update_sto
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
struct sched_entity *se = &p->se;
+ unsigned long scale = 1024;
+ unsigned long util = 0;
+ u64 vdelta;
+ u64 delta;
WARN_ON_ONCE(task_rq(p) != rq);
- if (rq->cfs.h_nr_queued > 1) {
- u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
- u64 slice = se->slice;
- s64 delta = slice - ran;
-
- if (delta < 0) {
- if (task_current_donor(rq, p))
- resched_curr(rq);
- return;
- }
- hrtick_start(rq, delta);
+ if (rq->cfs.h_nr_queued <= 1)
+ return;
+
+ /*
+ * Compute time until virtual deadline
+ */
+ vdelta = se->deadline - se->vruntime;
+ if ((s64)vdelta < 0) {
+ if (task_current_donor(rq, p))
+ resched_curr(rq);
+ return;
+ }
+ delta = (se->load.weight * vdelta) / NICE_0_LOAD;
+
+ /*
+ * Correct for instantaneous load of other classes.
+ */
+ util += cpu_util_dl(rq);
+ util += cpu_util_rt(rq);
+ util += cpu_util_irq(rq);
+ if (util && util < 1024) {
+ scale *= 1024;
+ scale /= (1024 - util);
}
+
+ hrtick_start(rq, (scale * delta) / 1024);
}
/*
@@ -13373,7 +13380,7 @@ static void task_tick_fair(struct rq *rq
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- entity_tick(cfs_rq, se, queued);
+ entity_tick(cfs_rq, se);
}
if (queued) {
Powered by blists - more mailing lists