[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230306141502.691294694@infradead.org>
Date: Mon, 06 Mar 2023 14:25:29 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org, vincent.guittot@...aro.org
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
juri.lelli@...hat.com, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, corbet@....net, qyousef@...alina.io,
chris.hyser@...cle.com, patrick.bellasi@...bug.net, pjt@...gle.com,
pavel@....cz, qperret@...gle.com, tim.c.chen@...ux.intel.com,
joshdon@...gle.com, timj@....org, kprateek.nayak@....com,
yu.c.chen@...el.com, youssefesmat@...omium.org,
joel@...lfernandes.org
Subject: [PATCH 08/10] sched/fair: Add lag based placement
With the introduction of avg_vruntime, it is possible to approximate
lag (the entire purpose of introducing it in fact). Use this to do lag
based placement over sleep+wake.
Specifically, the FAIR_SLEEPERS thing places things too far to the
left and messes up the deadline aspect of EEVDF.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
include/linux/sched.h | 1
kernel/sched/core.c | 1
kernel/sched/fair.c | 63 +++++++++++++++++++++++++++---------------------
kernel/sched/features.h | 8 ++++++
4 files changed, 46 insertions(+), 27 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -555,6 +555,7 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 vruntime;
u64 prev_sum_exec_runtime;
+ s64 lag;
u64 nr_migrations;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4436,6 +4436,7 @@ static void __sched_fork(unsigned long c
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+ p->se.lag = 0;
INIT_LIST_HEAD(&p->se.group_node);
set_latency_offset(p);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4749,39 +4749,45 @@ static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
u64 vruntime = avg_vruntime(cfs_rq);
- u64 sleep_time;
- /* sleeps up to a single latency don't count. */
- if (!initial) {
- unsigned long thresh;
-
- if (se_is_idle(se))
- thresh = sysctl_sched_min_granularity;
- else
- thresh = sysctl_sched_latency;
+ if (sched_feat(FAIR_SLEEPERS)) {
+ u64 sleep_time;
+
+ /* sleeps up to a single latency don't count. */
+ if (!initial) {
+ unsigned long thresh;
+
+ if (se_is_idle(se))
+ thresh = sysctl_sched_min_granularity;
+ else
+ thresh = sysctl_sched_latency;
+
+ /*
+ * Halve their sleep time's effect, to allow
+ * for a gentler effect of sleepers:
+ */
+ if (sched_feat(GENTLE_FAIR_SLEEPERS))
+ thresh >>= 1;
+
+ vruntime -= thresh;
+ }
/*
- * Halve their sleep time's effect, to allow
- * for a gentler effect of sleepers:
+ * Pull vruntime of the entity being placed to the base level of
+ * cfs_rq, to prevent boosting it if placed backwards. If the entity
+ * slept for a long time, don't even try to compare its vruntime with
+ * the base as it may be too far off and the comparison may get
+ * inversed due to s64 overflow.
*/
- if (sched_feat(GENTLE_FAIR_SLEEPERS))
- thresh >>= 1;
-
- vruntime -= thresh;
+ sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
+ if ((s64)sleep_time < 60LL * NSEC_PER_SEC)
+ vruntime = max_vruntime(se->vruntime, vruntime);
}
- /*
- * Pull vruntime of the entity being placed to the base level of
- * cfs_rq, to prevent boosting it if placed backwards. If the entity
- * slept for a long time, don't even try to compare its vruntime with
- * the base as it may be too far off and the comparison may get
- * inversed due to s64 overflow.
- */
- sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
- if ((s64)sleep_time > 60LL * NSEC_PER_SEC)
- se->vruntime = vruntime;
- else
- se->vruntime = max_vruntime(se->vruntime, vruntime);
+ if (sched_feat(PRESERVE_LAG))
+ vruntime -= se->lag;
+
+ se->vruntime = vruntime;
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -4949,6 +4955,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
clear_buddies(cfs_rq, se);
+ if (sched_feat(PRESERVE_LAG) && (flags & DEQUEUE_SLEEP))
+ se->lag = avg_vruntime(cfs_rq) - se->vruntime;
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -1,12 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
/*
* Only give sleepers 50% of their service deficit. This allows
* them to run sooner, but does not allow tons of sleepers to
* rip the spread apart.
*/
+SCHED_FEAT(FAIR_SLEEPERS, false)
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
/*
+ * Using the avg_vruntime, do the right thing and preserve lag
+ * across sleep+wake cycles.
+ */
+SCHED_FEAT(PRESERVE_LAG, true)
+
+/*
* Prefer to schedule the task we woke last (assuming it failed
* wakeup-preemption), since its likely going to consume data we
* touched, increases cache locality.
Powered by blists - more mailing lists