[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250920005931.2753828-37-tj@kernel.org>
Date: Fri, 19 Sep 2025 14:58:59 -1000
From: Tejun Heo <tj@...nel.org>
To: void@...ifault.com,
arighi@...dia.com,
multics69@...il.com
Cc: linux-kernel@...r.kernel.org,
sched-ext@...ts.linux.dev,
memxor@...il.com,
bpf@...r.kernel.org,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 36/46] sched_ext: Move scx_dsp_ctx and scx_dsp_max_batch into scx_sched
scx_dsp_ctx and scx_dsp_max_batch are global variables used in the dispatch
path. In prepration for multiple scheduler support, move the former into
scx_sched_pcpu and the latter into scx_sched. No user-visible behavior
changes intended.
Signed-off-by: Tejun Heo <tj@...nel.org>
---
kernel/sched/ext.c | 56 ++++++++++---------------------------
kernel/sched/ext_internal.h | 18 ++++++++++++
2 files changed, 33 insertions(+), 41 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 75a4b05fced4..3fcf6cd7fa00 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -102,25 +102,6 @@ static const struct rhashtable_params dsq_hash_params = {
static LLIST_HEAD(dsqs_to_free);
-/* dispatch buf */
-struct scx_dsp_buf_ent {
- struct task_struct *task;
- unsigned long qseq;
- u64 dsq_id;
- u64 enq_flags;
-};
-
-static u32 scx_dsp_max_batch;
-
-struct scx_dsp_ctx {
- struct rq *rq;
- u32 cursor;
- u32 nr_tasks;
- struct scx_dsp_buf_ent buf[];
-};
-
-static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
-
/* string formatting from BPF */
struct scx_bstr_buf {
u64 data[MAX_BPRINTF_VARARGS];
@@ -2164,7 +2145,7 @@ static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
u32 u;
for (u = 0; u < dspc->cursor; u++) {
@@ -2181,7 +2162,7 @@ static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
static bool scx_dispatch_sched(struct scx_sched *sch, struct rq *rq,
struct task_struct *prev)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
int nr_loops = SCX_DSP_MAX_LOOPS;
bool prev_on_sch = (prev->sched_class == &ext_sched_class) &&
sch == rcu_access_pointer(prev->scx.sched);
@@ -4356,10 +4337,6 @@ static void scx_root_disable(struct scx_sched *sch)
*/
kobject_del(&sch->kobj);
- free_percpu(scx_dsp_ctx);
- scx_dsp_ctx = NULL;
- scx_dsp_max_batch = 0;
-
mutex_unlock(&scx_enable_mutex);
WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
@@ -4785,7 +4762,10 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
sch->global_dsqs[node] = dsq;
}
- sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
+ sch->dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
+ sch->pcpu = __alloc_percpu(struct_size_t(struct scx_sched_pcpu,
+ dsp_ctx.buf, sch->dsp_max_batch),
+ __alignof__(struct scx_sched_pcpu));
if (!sch->pcpu)
goto err_free_gdsqs;
@@ -4999,16 +4979,6 @@ static int scx_root_enable(struct sched_ext_ops *ops, struct bpf_link *link)
if (ret)
goto err_disable;
- WARN_ON_ONCE(scx_dsp_ctx);
- scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
- scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
- scx_dsp_max_batch),
- __alignof__(struct scx_dsp_ctx));
- if (!scx_dsp_ctx) {
- ret = -ENOMEM;
- goto err_disable;
- }
-
if (ops->timeout_ms)
timeout = msecs_to_jiffies(ops->timeout_ms);
else
@@ -5947,7 +5917,7 @@ static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p
static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
u64 dsq_id, u64 enq_flags)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
struct task_struct *ddsp_task;
ddsp_task = __this_cpu_read(direct_dispatch_task);
@@ -5956,7 +5926,7 @@ static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
return;
}
- if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
+ if (unlikely(dspc->cursor >= sch->dsp_max_batch)) {
scx_error(sch, "dispatch buffer overflow");
return;
}
@@ -6204,7 +6174,7 @@ __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(const struct bpf_prog_aux *aux__prog)
if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
return 0;
- return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
+ return sch->dsp_max_batch - __this_cpu_read(sch->pcpu->dsp_ctx.cursor);
}
/**
@@ -6216,8 +6186,8 @@ __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(const struct bpf_prog_aux *aux__prog)
*/
__bpf_kfunc void scx_bpf_dispatch_cancel(const struct bpf_prog_aux *aux__prog)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
struct scx_sched *sch;
+ struct scx_dsp_ctx *dspc;
guard(rcu)();
@@ -6228,6 +6198,8 @@ __bpf_kfunc void scx_bpf_dispatch_cancel(const struct bpf_prog_aux *aux__prog)
if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
return;
+ dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
+
if (dspc->cursor > 0)
dspc->cursor--;
else
@@ -6252,9 +6224,9 @@ __bpf_kfunc void scx_bpf_dispatch_cancel(const struct bpf_prog_aux *aux__prog)
__bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id,
const struct bpf_prog_aux *aux__prog)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
struct scx_dispatch_q *dsq;
struct scx_sched *sch;
+ struct scx_dsp_ctx *dspc;
guard(rcu)();
@@ -6265,6 +6237,8 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id,
if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
return false;
+ dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
+
flush_dispatch_buf(sch, dspc->rq);
dsq = find_user_dsq(sch, dsq_id);
diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h
index 083ca14f03e2..8dbdae910564 100644
--- a/kernel/sched/ext_internal.h
+++ b/kernel/sched/ext_internal.h
@@ -913,6 +913,21 @@ enum scx_sched_pcpu_flags {
SCX_SCHED_PCPU_BYPASSING = 1LLU << 0,
};
+/* dispatch buf */
+struct scx_dsp_buf_ent {
+ struct task_struct *task;
+ unsigned long qseq;
+ u64 dsq_id;
+ u64 enq_flags;
+};
+
+struct scx_dsp_ctx {
+ struct rq *rq;
+ u32 cursor;
+ u32 nr_tasks;
+ struct scx_dsp_buf_ent buf[];
+};
+
struct scx_sched_pcpu {
u64 flags; /* protected by rq lock */
@@ -922,6 +937,8 @@ struct scx_sched_pcpu {
* constructed when requested by scx_bpf_events().
*/
struct scx_event_stats event_stats;
+
+ struct scx_dsp_ctx dsp_ctx;
};
struct scx_sched {
@@ -941,6 +958,7 @@ struct scx_sched {
struct scx_sched_pcpu __percpu *pcpu;
s32 bypass_depth;
+ u32 dsp_max_batch;
s32 level;
bool warned_zero_slice:1;
bool warned_deprecated_rq:1;
--
2.51.0
Powered by blists - more mailing lists