[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241116160126.29454-3-changwoo@igalia.com>
Date: Sun, 17 Nov 2024 01:01:23 +0900
From: Changwoo Min <multics69@...il.com>
To: tj@...nel.org,
void@...ifault.com
Cc: mingo@...hat.com,
peterz@...radead.org,
changwoo@...lia.com,
kernel-dev@...lia.com,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/5] sched_ext: Manage the validity of scx_rq_clock
An rq clock becomes valid when it is updated using update_rq_clock()
and invalidated when the rq is unlocked using rq_unpin_lock(). Also,
after long running operations -- ops.running() and ops.update_idle() --
in a BPF scheduler, the sched_ext core invalidates the rq clock.
Signed-off-by: Changwoo Min <changwoo@...lia.com>
---
kernel/sched/core.c | 6 +++++-
kernel/sched/ext.c | 3 +++
kernel/sched/sched.h | 2 +-
3 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a910a5b4c274..d0eb58b6a2da 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -784,6 +784,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
void update_rq_clock(struct rq *rq)
{
s64 delta;
+ u64 clock;
lockdep_assert_rq_held(rq);
@@ -795,11 +796,14 @@ void update_rq_clock(struct rq *rq)
SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
rq->clock_update_flags |= RQCF_UPDATED;
#endif
+ clock = sched_clock_cpu(cpu_of(rq));
+ scx_rq_clock_update(rq, clock);
- delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+ delta = clock - rq->clock;
if (delta < 0)
return;
rq->clock += delta;
+
update_rq_clock_task(rq, delta);
}
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 9f9bc2930658..b8ad776ef516 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -2928,6 +2928,8 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
+ scx_rq_clock_stale(rq);
+
clr_task_runnable(p, true);
/*
@@ -3590,6 +3592,7 @@ void __scx_update_idle(struct rq *rq, bool idle)
{
int cpu = cpu_of(rq);
+ scx_rq_clock_stale(rq);
if (SCX_HAS_OP(update_idle) && !scx_rq_bypassing(rq)) {
SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
if (!static_branch_unlikely(&scx_builtin_idle_enabled))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 61efff790e24..03854ac9914b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1758,7 +1758,7 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
if (rq->clock_update_flags > RQCF_ACT_SKIP)
rf->clock_update_flags = RQCF_UPDATED;
#endif
-
+ scx_rq_clock_stale(rq);
lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
}
--
2.47.0
Powered by blists - more mailing lists