[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240711011434.1421572-5-tj@kernel.org>
Date: Wed, 10 Jul 2024 15:14:01 -1000
From: Tejun Heo <tj@...nel.org>
To: void@...ifault.com
Cc: linux-kernel@...r.kernel.org,
kernel-team@...a.com,
schatzberg.dan@...il.com,
mingo@...hat.com,
peterz@...radead.org,
changwoo@...lia.com,
righi.andrea@...il.com,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 4/6] sched_ext: s/SCX_RQ_BALANCING/SCX_RQ_IN_BALANCE/ and add SCX_RQ_IN_WAKEUP
SCX_RQ_BALANCING is used to mark that the rq is currently in balance().
Rename it to SCX_RQ_IN_BALANCE and add SCX_RQ_IN_WAKEUP which marks whether
the rq is currently enqueueing for a wakeup. This will be used to implement
direct dispatching to local DSQ of another CPU.
Signed-off-by: Tejun Heo <tj@...nel.org>
Acked-by: David Vernet <void@...ifault.com>
---
kernel/sched/ext.c | 13 +++++++++----
kernel/sched/sched.h | 6 ++++--
2 files changed, 13 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index d4f801cd2548..57d6ea65f857 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1827,6 +1827,9 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags
{
int sticky_cpu = p->scx.sticky_cpu;
+ if (enq_flags & ENQUEUE_WAKEUP)
+ rq->scx.flags |= SCX_RQ_IN_WAKEUP;
+
enq_flags |= rq->scx.extra_enq_flags;
if (sticky_cpu >= 0)
@@ -1843,7 +1846,7 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags
if (p->scx.flags & SCX_TASK_QUEUED) {
WARN_ON_ONCE(!task_runnable(p));
- return;
+ goto out;
}
set_task_runnable(rq, p);
@@ -1858,6 +1861,8 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags
touch_core_sched(rq, p);
do_enqueue_task(rq, p, enq_flags, sticky_cpu);
+out:
+ rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
}
static void ops_dequeue(struct task_struct *p, u64 deq_flags)
@@ -2420,7 +2425,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev, bool local)
bool has_tasks = false;
lockdep_assert_rq_held(rq);
- rq->scx.flags |= SCX_RQ_BALANCING;
+ rq->scx.flags |= SCX_RQ_IN_BALANCE;
if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
unlikely(rq->scx.cpu_released)) {
@@ -2514,7 +2519,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev, bool local)
has_tasks:
has_tasks = true;
out:
- rq->scx.flags &= ~SCX_RQ_BALANCING;
+ rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
return has_tasks;
}
@@ -5063,7 +5068,7 @@ static bool can_skip_idle_kick(struct rq *rq)
* The race window is small and we don't and can't guarantee that @rq is
* only kicked while idle anyway. Skip only when sure.
*/
- return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_BALANCING);
+ return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
}
static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 86314a17f1c7..8a0e8052f6b0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -737,8 +737,10 @@ enum scx_rq_flags {
* only while the BPF scheduler considers the CPU to be online.
*/
SCX_RQ_ONLINE = 1 << 0,
- SCX_RQ_BALANCING = 1 << 1,
- SCX_RQ_CAN_STOP_TICK = 1 << 2,
+ SCX_RQ_CAN_STOP_TICK = 1 << 1,
+
+ SCX_RQ_IN_WAKEUP = 1 << 16,
+ SCX_RQ_IN_BALANCE = 1 << 17,
};
struct scx_rq {
--
2.45.2
Powered by blists - more mailing lists