[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260121231140.832332-17-tj@kernel.org>
Date: Wed, 21 Jan 2026 13:11:22 -1000
From: Tejun Heo <tj@...nel.org>
To: linux-kernel@...r.kernel.org,
sched-ext@...ts.linux.dev
Cc: void@...ifault.com,
andrea.righi@...ux.dev,
changwoo@...lia.com,
emil@...alapatis.com,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 16/34] sched_ext: Move aborting flag to per-scheduler field
The abort state was tracked in the global scx_aborting flag which was used to
break out of potential live-lock scenarios when an error occurs. With
hierarchical scheduling, each scheduler instance must track its own abort
state independently so that an aborting scheduler doesn't interfere with
others.
Move the aborting flag into struct scx_sched and update all access sites. The
early initialization check in scx_root_enable() that warned about residual
aborting state is no longer needed as each scheduler instance now starts with
a clean state.
Signed-off-by: Tejun Heo <tj@...nel.org>
---
kernel/sched/ext.c | 10 +++-------
kernel/sched/ext_internal.h | 1 +
2 files changed, 4 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index aad534a8f704..e71fce857c6b 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -44,7 +44,6 @@ static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
static int scx_bypass_depth;
static cpumask_var_t scx_bypass_lb_donee_cpumask;
static cpumask_var_t scx_bypass_lb_resched_cpumask;
-static bool scx_aborting;
static bool scx_init_task_enabled;
static bool scx_switching_all;
DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
@@ -2057,7 +2056,7 @@ static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
* the system into the bypass mode. This can easily live-lock the
* machine. If aborting, exit from all non-bypass DSQs.
*/
- if (unlikely(READ_ONCE(scx_aborting)) && dsq->id != SCX_DSQ_BYPASS)
+ if (unlikely(READ_ONCE(sch->aborting)) && dsq->id != SCX_DSQ_BYPASS)
break;
if (rq == task_rq) {
@@ -4480,7 +4479,6 @@ static void scx_root_disable(struct scx_sched *sch)
/* guarantee forward progress and wait for descendants to be disabled */
scx_bypass(true);
- WRITE_ONCE(scx_aborting, false);
drain_descendants(sch);
switch (scx_set_enable_state(SCX_DISABLING)) {
@@ -4632,7 +4630,7 @@ static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
* flag to break potential live-lock scenarios, ensuring we can
* successfully reach scx_bypass().
*/
- WRITE_ONCE(scx_aborting, true);
+ WRITE_ONCE(sch->aborting, true);
/*
* Propagate exits to descendants immediately. Each has a dedicated
@@ -5268,8 +5266,6 @@ static s32 scx_root_enable(struct sched_ext_ops *ops, struct bpf_link *link)
*/
WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED);
WARN_ON_ONCE(scx_root);
- if (WARN_ON_ONCE(READ_ONCE(scx_aborting)))
- WRITE_ONCE(scx_aborting, false);
atomic_long_set(&scx_nr_rejected, 0);
@@ -6502,7 +6498,7 @@ static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
* If the BPF scheduler keeps calling this function repeatedly, it can
* cause similar live-lock conditions as consume_dispatch_q().
*/
- if (unlikely(READ_ONCE(scx_aborting)))
+ if (unlikely(READ_ONCE(sch->aborting)))
return false;
if (unlikely(!scx_task_on_sched(sch, p))) {
diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h
index 8836bad027ea..16c1f0eb6c69 100644
--- a/kernel/sched/ext_internal.h
+++ b/kernel/sched/ext_internal.h
@@ -951,6 +951,7 @@ struct scx_sched {
struct scx_sched_pcpu __percpu *pcpu;
u64 slice_dfl;
+ bool aborting;
s32 level;
/*
--
2.52.0
Powered by blists - more mailing lists