[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251022093826.113508-1-arighi@nvidia.com>
Date: Wed, 22 Oct 2025 11:38:26 +0200
From: Andrea Righi <arighi@...dia.com>
To: Tejun Heo <tj@...nel.org>,
David Vernet <void@...ifault.com>,
Changwoo Min <changwoo@...lia.com>
Cc: Emil Tsalapatis <emil@...alapatis.com>,
sched-ext@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: [PATCH sched_ext/for-6.19] sched_ext: Use ___v2 suffix for new kfuncs and fix scx build errors
Following commit 2dbbdeda77a61 ("sched_ext: Fix scx_bpf_dsq_insert()
backward binary compatibility"), consistently use the ___v2 suffix also
to the new scx_bpf_dsq_insert_vtime() and scx_bpf_select_cpu_and()
kfuncs.
Introduce __COMPAT_scx_bpf_select_cpu_and() and
__COMPAT_scx_bpf_dsq_insert_vtime(), to ensure schedulers can transition
smoothly to the updated interfaces, and temporarily mirror the
definitions of struct scx_bpf_select_cpu_and_args and struct
scx_bpf_dsq_insert_vtime_args to prevent build failures on kernels where
these structs are not yet defined.
Both the compatibility helpers and mirrored structs will be removed in
v6.23.
With these changes in place, all schedulers in the scx repository build
and run correctly with the updated headers.
Fixes: c0d630ba347c7 ("sched_ext: Wrap kfunc args in struct to prepare for aux__prog")
Signed-off-by: Andrea Righi <arighi@...dia.com>
---
kernel/sched/ext.c | 10 ++--
kernel/sched/ext_idle.c | 10 ++--
tools/sched_ext/include/scx/common.bpf.h | 3 --
tools/sched_ext/include/scx/compat.bpf.h | 63 +++++++++++++++++-------
4 files changed, 56 insertions(+), 30 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index cb41365032ebc..8816d89426a6f 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -5490,7 +5490,7 @@ struct scx_bpf_dsq_insert_vtime_args {
};
/**
- * __scx_bpf_dsq_insert_vtime - Arg-wrapped vtime DSQ insertion
+ * scx_bpf_dsq_insert_vtime___v2 - Arg-wrapped vtime DSQ insertion
* @p: task_struct to insert
* @args: struct containing the rest of the arguments
* @args->dsq_id: DSQ to insert into
@@ -5520,8 +5520,8 @@ struct scx_bpf_dsq_insert_vtime_args {
* to check the return value.
*/
__bpf_kfunc bool
-__scx_bpf_dsq_insert_vtime(struct task_struct *p,
- struct scx_bpf_dsq_insert_vtime_args *args)
+scx_bpf_dsq_insert_vtime___v2(struct task_struct *p,
+ struct scx_bpf_dsq_insert_vtime_args *args)
{
struct scx_sched *sch;
@@ -5536,7 +5536,7 @@ __scx_bpf_dsq_insert_vtime(struct task_struct *p,
}
/*
- * COMPAT: Will be removed in v6.23.
+ * COMPAT: Will be removed in v6.23 along with the ___v2 suffix.
*/
__bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
u64 slice, u64 vtime, u64 enq_flags)
@@ -5557,7 +5557,7 @@ __bpf_kfunc_end_defs();
BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_dsq_insert___v2, KF_RCU)
-BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime___v2, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 3d9d404d5cd20..d4b6b5a300345 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -1003,7 +1003,7 @@ struct scx_bpf_select_cpu_and_args {
};
/**
- * __scx_bpf_select_cpu_and - Arg-wrapped CPU selection with cpumask
+ * scx_bpf_select_cpu_and___v2 - Arg-wrapped CPU selection with cpumask
* @p: task_struct to select a CPU for
* @cpus_allowed: cpumask of allowed CPUs
* @args: struct containing the rest of the arguments
@@ -1027,8 +1027,8 @@ struct scx_bpf_select_cpu_and_args {
* a negative value if no idle CPU is available.
*/
__bpf_kfunc s32
-__scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed,
- struct scx_bpf_select_cpu_and_args *args)
+scx_bpf_select_cpu_and___v2(struct task_struct *p, const struct cpumask *cpus_allowed,
+ struct scx_bpf_select_cpu_and_args *args)
{
struct scx_sched *sch;
@@ -1043,7 +1043,7 @@ __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allow
}
/*
- * COMPAT: Will be removed in v6.22.
+ * COMPAT: Will be removed in v6.22 along with the ___v2 suffix.
*/
__bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags)
@@ -1413,7 +1413,7 @@ BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
-BTF_ID_FLAGS(func, __scx_bpf_select_cpu_and, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_select_cpu_and___v2, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
BTF_KFUNCS_END(scx_kfunc_ids_idle)
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index e65b1eb668ea5..64e5411d04c04 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -60,9 +60,6 @@ static inline void ___vmlinux_h_sanity_check___(void)
s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym;
s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym;
-s32 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed,
- struct scx_bpf_select_cpu_and_args *args) __ksym __weak;
-bool __scx_bpf_dsq_insert_vtime(struct task_struct *p, struct scx_bpf_dsq_insert_vtime_args *args) __ksym __weak;
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
void scx_bpf_dispatch_cancel(void) __ksym;
bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak;
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
index a023b71991a6a..963ff3263c20b 100644
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -161,6 +161,25 @@ static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
return rq ? rq->curr : NULL;
}
+/*
+ * v6.19: Mirror the following _args structs, to prevent build errors in
+ * kernels that don't have these structs defined yet.
+ *
+ * The kernel will carry these mirrored structs until v6.23 (see below).
+ */
+struct scx_bpf_select_cpu_and_args {
+ s32 prev_cpu;
+ u64 wake_flags;
+ u64 flags;
+};
+
+struct scx_bpf_dsq_insert_vtime_args {
+ u64 dsq_id;
+ u64 slice;
+ u64 vtime;
+ u64 enq_flags;
+};
+
/*
* v6.19: To work around BPF maximum parameter limit, the following kfuncs are
* replaced with variants that pack scalar arguments in a struct. Wrappers are
@@ -170,12 +189,20 @@ static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
* compatibility. After v6.23 release, remove the compat handling and move the
* wrappers to common.bpf.h.
*/
-s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
- const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
-void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
+s32 scx_bpf_select_cpu_and___v2(struct task_struct *p, const struct cpumask *cpus_allowed,
+ struct scx_bpf_select_cpu_and_args *args) __ksym __weak;
+
+s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+ const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
+
+bool scx_bpf_dsq_insert_vtime___v2(struct task_struct *p,
+ struct scx_bpf_dsq_insert_vtime_args *args) __ksym __weak;
+
+void scx_bpf_dsq_insert_vtime(struct task_struct *p,
+ u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
/**
- * scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p
+ * __COMPAT_scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p
* @p: task_struct to select a CPU for
* @prev_cpu: CPU @p was on previously
* @wake_flags: %SCX_WAKE_* flags
@@ -183,11 +210,12 @@ void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 sl
* @flags: %SCX_PICK_IDLE* flags
*
* Inline wrapper that packs scalar arguments into a struct and calls
- * __scx_bpf_select_cpu_and(). See __scx_bpf_select_cpu_and() for details.
+ * scx_bpf_select_cpu_and___v2(). See scx_bpf_select_cpu_and___v2() for
+ * details.
*/
static inline s32
-scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
- const struct cpumask *cpus_allowed, u64 flags)
+__COMPAT_scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+ const struct cpumask *cpus_allowed, u64 flags)
{
if (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args)) {
struct scx_bpf_select_cpu_and_args args = {
@@ -196,15 +224,16 @@ scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
.flags = flags,
};
- return __scx_bpf_select_cpu_and(p, cpus_allowed, &args);
+ return scx_bpf_select_cpu_and___v2(p, cpus_allowed, &args);
} else {
- return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags,
- cpus_allowed, flags);
+ return scx_bpf_select_cpu_and(p, prev_cpu, wake_flags,
+ cpus_allowed, flags);
}
}
/**
- * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
+ * __COMPAT_scx_bpf_dsq_insert_vtime - Insert a task into the vtime
+ * priority queue of a DSQ
* @p: task_struct to insert
* @dsq_id: DSQ to insert into
* @slice: duration @p can run for in nsecs, 0 to keep the current value
@@ -212,11 +241,12 @@ scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
* @enq_flags: SCX_ENQ_*
*
* Inline wrapper that packs scalar arguments into a struct and calls
- * __scx_bpf_dsq_insert_vtime(). See __scx_bpf_dsq_insert_vtime() for details.
+ * scx_bpf_dsq_insert_vtime___v2(). See scx_bpf_dsq_insert_vtime___v2() for
+ * details.
*/
static inline bool
-scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime,
- u64 enq_flags)
+__COMPAT_scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime,
+ u64 enq_flags)
{
if (bpf_core_type_exists(struct scx_bpf_dsq_insert_vtime_args)) {
struct scx_bpf_dsq_insert_vtime_args args = {
@@ -226,10 +256,9 @@ scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime
.enq_flags = enq_flags,
};
- return __scx_bpf_dsq_insert_vtime(p, &args);
+ return scx_bpf_dsq_insert_vtime___v2(p, &args);
} else {
- scx_bpf_dsq_insert_vtime___compat(p, dsq_id, slice, vtime,
- enq_flags);
+ scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags);
return true;
}
}
--
2.51.1
Powered by blists - more mailing lists