[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260121231140.832332-16-tj@kernel.org>
Date: Wed, 21 Jan 2026 13:11:21 -1000
From: Tejun Heo <tj@...nel.org>
To: linux-kernel@...r.kernel.org,
sched-ext@...ts.linux.dev
Cc: void@...ifault.com,
andrea.righi@...ux.dev,
changwoo@...lia.com,
emil@...alapatis.com,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 15/34] sched_ext: Move default slice to per-scheduler field
The default time slice was stored in the global scx_slice_dfl variable which
was dynamically modified when entering and exiting bypass mode. With
hierarchical scheduling, each scheduler instance needs its own default slice
configuration so that bypass operations on one scheduler don't affect others.
Move slice_dfl into struct scx_sched and update all access sites. The bypass
logic now modifies the root scheduler's slice_dfl. At task initialization in
init_scx_entity(), use the SCX_SLICE_DFL constant directly since the task may
not yet be associated with a specific scheduler.
Signed-off-by: Tejun Heo <tj@...nel.org>
---
kernel/sched/ext.c | 12 ++++++------
kernel/sched/ext_internal.h | 1 +
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index ad4f8fbc0f24..aad534a8f704 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -164,7 +164,6 @@ static struct kset *scx_kset;
* There usually is no reason to modify these as normal scheduler operation
* shouldn't be affected by them. The knobs are primarily for debugging.
*/
-static u64 scx_slice_dfl = SCX_SLICE_DFL;
static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC;
static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US;
@@ -1127,7 +1126,7 @@ static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
{
- p->scx.slice = READ_ONCE(scx_slice_dfl);
+ p->scx.slice = READ_ONCE(sch->slice_dfl);
__scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
}
@@ -3151,7 +3150,7 @@ void init_scx_entity(struct sched_ext_entity *scx)
INIT_LIST_HEAD(&scx->runnable_node);
scx->runnable_at = jiffies;
scx->ddsp_dsq_id = SCX_DSQ_INVALID;
- scx->slice = READ_ONCE(scx_slice_dfl);
+ scx->slice = SCX_SLICE_DFL;
}
void scx_pre_fork(struct task_struct *p)
@@ -4262,7 +4261,7 @@ static void scx_bypass(bool bypass)
WARN_ON_ONCE(scx_bypass_depth <= 0);
if (scx_bypass_depth != 1)
goto unlock;
- WRITE_ONCE(scx_slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC);
+ WRITE_ONCE(sch->slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC);
bypass_timestamp = ktime_get_ns();
if (sch)
scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
@@ -4278,7 +4277,7 @@ static void scx_bypass(bool bypass)
WARN_ON_ONCE(scx_bypass_depth < 0);
if (scx_bypass_depth != 0)
goto unlock;
- WRITE_ONCE(scx_slice_dfl, SCX_SLICE_DFL);
+ WRITE_ONCE(sch->slice_dfl, SCX_SLICE_DFL);
if (sch)
scx_add_event(sch, SCX_EV_BYPASS_DURATION,
ktime_get_ns() - bypass_timestamp);
@@ -5111,6 +5110,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
sch->ancestors[level] = sch;
sch->level = level;
+ sch->slice_dfl = SCX_SLICE_DFL;
atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
kthread_init_work(&sch->disable_work, scx_disable_workfn);
@@ -5447,7 +5447,7 @@ static s32 scx_root_enable(struct sched_ext_ops *ops, struct bpf_link *link)
queue_flags |= DEQUEUE_CLASS;
scoped_guard (sched_change, p, queue_flags) {
- p->scx.slice = READ_ONCE(scx_slice_dfl);
+ p->scx.slice = READ_ONCE(sch->slice_dfl);
p->sched_class = new_class;
}
}
diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h
index 335e90ca132e..8836bad027ea 100644
--- a/kernel/sched/ext_internal.h
+++ b/kernel/sched/ext_internal.h
@@ -950,6 +950,7 @@ struct scx_sched {
struct scx_dispatch_q **global_dsqs;
struct scx_sched_pcpu __percpu *pcpu;
+ u64 slice_dfl;
s32 level;
/*
--
2.52.0
Powered by blists - more mailing lists