[<prev] [next>] [day] [month] [year] [list]
Message-ID: <ZvSHZL3mBlYm2H_A@slm.duckdns.org>
Date: Wed, 25 Sep 2024 11:57:56 -1000
From: Tejun Heo <tj@...nel.org>
To: David Vernet <void@...ifault.com>
Cc: linux-kernel@...r.kernel.org, kernel-team@...a.com, sched-ext@...a.com
Subject: [sched_ext/for-6.12-fixes] sched_ext: Add __COMPAT helpers for
features added during v6.12 devel cycle
cgroup support and scx_bpf_dispatch[_vtime]_from_dsq() are newly added since
8bb30798fd6e ("sched_ext: Fixes incorrect type in bpf_scx_init()") which is
the current earliest commit targeted by BPF schedulers. Add compat helpers
for them and apply them in the example schedulers.
These will be dropped after a few kernel releases. The exact backward
compatibility window hasn't been decided yet.
Signed-off-by: Tejun Heo <tj@...nel.org>
---
Applying to sched_ext/for-6.12-fixes.
Thanks.
tools/sched_ext/include/scx/compat.bpf.h | 19 +++++++++++++++++++
tools/sched_ext/scx_flatcg.bpf.c | 10 +++++-----
tools/sched_ext/scx_qmap.bpf.c | 12 ++++++------
3 files changed, 30 insertions(+), 11 deletions(-)
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -15,6 +15,25 @@
__ret; \
})
+/* v6.12: 819513666966 ("sched_ext: Add cgroup support") */
+#define __COMPAT_scx_bpf_task_cgroup(p) \
+ (bpf_ksym_exists(scx_bpf_task_cgroup) ? \
+ scx_bpf_task_cgroup((p)) : NULL)
+
+/* v6.12: 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()") */
+#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it, slice) \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice) ? \
+ scx_bpf_dispatch_from_dsq_set_slice((it), (slice)) : (void)0)
+#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it, vtime) \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime) ? \
+ scx_bpf_dispatch_from_dsq_set_vtime((it), (vtime)) : (void)0)
+#define __COMPAT_scx_bpf_dispatch_from_dsq(it, p, dsq_id, enq_flags) \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq) ? \
+ scx_bpf_dispatch_from_dsq((it), (p), (dsq_id), (enq_flags)) : false)
+#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it, p, dsq_id, enq_flags) \
+ (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq) ? \
+ scx_bpf_dispatch_vtime_from_dsq((it), (p), (dsq_id), (enq_flags)) : false)
+
/*
* Define sched_ext_ops. This may be expanded to define multiple variants for
* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
--- a/tools/sched_ext/scx_flatcg.bpf.c
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -383,7 +383,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct
return;
}
- cgrp = scx_bpf_task_cgroup(p);
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp);
if (!cgc)
goto out_release;
@@ -509,7 +509,7 @@ void BPF_STRUCT_OPS(fcg_runnable, struct
{
struct cgroup *cgrp;
- cgrp = scx_bpf_task_cgroup(p);
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
update_active_weight_sums(cgrp, true);
bpf_cgroup_release(cgrp);
}
@@ -522,7 +522,7 @@ void BPF_STRUCT_OPS(fcg_running, struct
if (fifo_sched)
return;
- cgrp = scx_bpf_task_cgroup(p);
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp);
if (cgc) {
/*
@@ -565,7 +565,7 @@ void BPF_STRUCT_OPS(fcg_stopping, struct
if (!taskc->bypassed_at)
return;
- cgrp = scx_bpf_task_cgroup(p);
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp);
if (cgc) {
__sync_fetch_and_add(&cgc->cvtime_delta,
@@ -579,7 +579,7 @@ void BPF_STRUCT_OPS(fcg_quiescent, struc
{
struct cgroup *cgrp;
- cgrp = scx_bpf_task_cgroup(p);
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
update_active_weight_sums(cgrp, false);
bpf_cgroup_release(cgrp);
}
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -318,11 +318,11 @@ static bool dispatch_highpri(bool from_t
if (tctx->highpri) {
/* exercise the set_*() and vtime interface too */
- scx_bpf_dispatch_from_dsq_set_slice(
+ __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(
BPF_FOR_EACH_ITER, slice_ns * 2);
- scx_bpf_dispatch_from_dsq_set_vtime(
+ __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(
BPF_FOR_EACH_ITER, highpri_seq++);
- scx_bpf_dispatch_vtime_from_dsq(
+ __COMPAT_scx_bpf_dispatch_vtime_from_dsq(
BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
}
}
@@ -340,9 +340,9 @@ static bool dispatch_highpri(bool from_t
else
cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0);
- if (scx_bpf_dispatch_from_dsq(BPF_FOR_EACH_ITER, p,
- SCX_DSQ_LOCAL_ON | cpu,
- SCX_ENQ_PREEMPT)) {
+ if (__COMPAT_scx_bpf_dispatch_from_dsq(BPF_FOR_EACH_ITER, p,
+ SCX_DSQ_LOCAL_ON | cpu,
+ SCX_ENQ_PREEMPT)) {
if (cpu == this_cpu) {
dispatched = true;
__sync_fetch_and_add(&nr_expedited_local, 1);
Powered by blists - more mailing lists