[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240901164417.779239-5-tj@kernel.org>
Date: Sun, 1 Sep 2024 06:43:41 -1000
From: Tejun Heo <tj@...nel.org>
To: void@...ifault.com
Cc: kernel-team@...a.com,
linux-kernel@...r.kernel.org,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 04/12] sched_ext: Fix processs_ddsp_deferred_locals() by unifying DTL_INVALID handling
With the preceding update, the only return value which makes meaningful
difference is DTL_INVALID, for which one caller, finish_dispatch(), falls
back to the global DSQ and the other, process_ddsp_deferred_locals(),
doesn't do anything.
It should always fallback to the global DSQ. Move the global DSQ fallback
into dispatch_to_local_dsq() and remove the return value.
v2: Patch title and description updated to reflect the behavior fix for
process_ddsp_deferred_locals().
Signed-off-by: Tejun Heo <tj@...nel.org>
Acked-by: David Vernet <void@...ifault.com>
---
kernel/sched/ext.c | 41 ++++++++++-------------------------------
1 file changed, 10 insertions(+), 31 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 1d35298ee561..ec61ab676517 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -2299,12 +2299,6 @@ static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
return false;
}
-enum dispatch_to_local_dsq_ret {
- DTL_DISPATCHED, /* successfully dispatched */
- DTL_LOST, /* lost race to dequeue */
- DTL_INVALID, /* invalid local dsq_id */
-};
-
/**
* dispatch_to_local_dsq - Dispatch a task to a local dsq
* @rq: current rq which is locked
@@ -2319,9 +2313,8 @@ enum dispatch_to_local_dsq_ret {
* The caller must have exclusive ownership of @p (e.g. through
* %SCX_OPSS_DISPATCHING).
*/
-static enum dispatch_to_local_dsq_ret
-dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
- struct task_struct *p, u64 enq_flags)
+static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ struct task_struct *p, u64 enq_flags)
{
struct rq *src_rq = task_rq(p);
struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
@@ -2334,13 +2327,11 @@ dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
*/
if (rq == src_rq && rq == dst_rq) {
dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
- return DTL_DISPATCHED;
+ return;
}
#ifdef CONFIG_SMP
if (likely(task_can_run_on_remote_rq(p, dst_rq, true))) {
- bool dsp;
-
/*
* @p is on a possibly remote @src_rq which we need to lock to
* move the task. If dequeue is in progress, it'd be locking
@@ -2365,10 +2356,8 @@ dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
}
/* task_rq couldn't have changed if we're still the holding cpu */
- dsp = p->scx.holding_cpu == raw_smp_processor_id() &&
- !WARN_ON_ONCE(src_rq != task_rq(p));
-
- if (likely(dsp)) {
+ if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
+ !WARN_ON_ONCE(src_rq != task_rq(p))) {
/*
* If @p is staying on the same rq, there's no need to
* go through the full deactivate/activate cycle.
@@ -2396,11 +2385,11 @@ dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
raw_spin_rq_lock(rq);
}
- return dsp ? DTL_DISPATCHED : DTL_LOST;
+ return;
}
#endif /* CONFIG_SMP */
- return DTL_INVALID;
+ dispatch_enqueue(&scx_dsq_global, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
}
/**
@@ -2477,20 +2466,10 @@ static void finish_dispatch(struct rq *rq, struct task_struct *p,
dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
- if (dsq->id == SCX_DSQ_LOCAL) {
- switch (dispatch_to_local_dsq(rq, dsq, p, enq_flags)) {
- case DTL_DISPATCHED:
- break;
- case DTL_LOST:
- break;
- case DTL_INVALID:
- dispatch_enqueue(&scx_dsq_global, p,
- enq_flags | SCX_ENQ_CLEAR_OPSS);
- break;
- }
- } else {
+ if (dsq->id == SCX_DSQ_LOCAL)
+ dispatch_to_local_dsq(rq, dsq, p, enq_flags);
+ else
dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
- }
}
static void flush_dispatch_buf(struct rq *rq)
--
2.46.0
Powered by blists - more mailing lists