[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260204160710.1475802-3-arighi@nvidia.com>
Date: Wed, 4 Feb 2026 17:05:59 +0100
From: Andrea Righi <arighi@...dia.com>
To: Tejun Heo <tj@...nel.org>,
David Vernet <void@...ifault.com>,
Changwoo Min <changwoo@...lia.com>
Cc: Kuba Piecuch <jpiecuch@...gle.com>,
Emil Tsalapatis <emil@...alapatis.com>,
Christian Loehle <christian.loehle@....com>,
Daniel Hodges <hodgesd@...a.com>,
sched-ext@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/2] selftests/sched_ext: Add test to validate ops.dequeue() semantics
Add a new kselftest to validate the following scenarios:
- scenario 0 (Local DSQ): tasks dispatched to local DSQs bypass BPF
scheduler entirely, they never enter BPF custody, so no
ops.dequeue() should be called,
- scenario 1 (Global DSQ): tasks dispatched to SCX_DSQ_GLOBAL also
bypass BPF scheduler, like local DSQs, no ops.dequeue() should be
called,
- scenario 2 (User DSQ): tasks enter BPF scheduler custody with full
enqueue/dequeue lifecycle tracking and state machine validation
(expects 1:1 enqueue/dequeue pairing).
The test validates that:
- terminal DSQ dispatch (local, global) don't trigger ops.dequeue(),
- user DSQ dispatch has exact 1:1 ops.enqueue()/dequeue() pairing,
- dispatch dequeues have no flags (normal workflow),
- property change dequeues have the %SCX_DEQ_SCHED_CHANGE flag set,
- no duplicate enqueues or invalid state transitions are happening.
This validates that the new ops.dequeue() semantics work correctly for
all task lifecycle scenarios, including the distinction between terminal
DSQs (where BPF scheduler is done with the task) and user DSQs (where
BPF scheduler manages the task lifecycle).
Cc: Tejun Heo <tj@...nel.org>
Cc: Emil Tsalapatis <emil@...alapatis.com>
Cc: Kuba Piecuch <jpiecuch@...gle.com>
Signed-off-by: Andrea Righi <arighi@...dia.com>
---
tools/testing/selftests/sched_ext/Makefile | 1 +
.../testing/selftests/sched_ext/dequeue.bpf.c | 269 ++++++++++++++++++
tools/testing/selftests/sched_ext/dequeue.c | 207 ++++++++++++++
3 files changed, 477 insertions(+)
create mode 100644 tools/testing/selftests/sched_ext/dequeue.bpf.c
create mode 100644 tools/testing/selftests/sched_ext/dequeue.c
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
index 5fe45f9c5f8fd..764e91edabf93 100644
--- a/tools/testing/selftests/sched_ext/Makefile
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -161,6 +161,7 @@ all_test_bpfprogs := $(foreach prog,$(wildcard *.bpf.c),$(INCLUDE_DIR)/$(patsubs
auto-test-targets := \
create_dsq \
+ dequeue \
enq_last_no_enq_fails \
ddsp_bogus_dsq_fail \
ddsp_vtimelocal_fail \
diff --git a/tools/testing/selftests/sched_ext/dequeue.bpf.c b/tools/testing/selftests/sched_ext/dequeue.bpf.c
new file mode 100644
index 0000000000000..5d736ffadb4c8
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/dequeue.bpf.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A scheduler that validates ops.dequeue() is called correctly:
+ * - Tasks dispatched to terminal DSQs (local, global) bypass the BPF
+ * scheduler entirely: no ops.dequeue() should be called
+ * - Tasks dispatched to user DSQs enter BPF custody: ops.dequeue() must be
+ * called when they leave custody
+ * - Every ops.enqueue() for non-terminal DSQs is followed by exactly one
+ * ops.dequeue() (validate 1:1 pairing and state machine)
+ *
+ * Copyright (c) 2026 NVIDIA Corporation.
+ */
+
+#include <scx/common.bpf.h>
+
+#define SHARED_DSQ 0
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei);
+
+/*
+ * Counters to track the lifecycle of tasks:
+ * - enqueue_cnt: Number of times ops.enqueue() was called
+ * - dequeue_cnt: Number of times ops.dequeue() was called (any type)
+ * - dispatch_dequeue_cnt: Number of regular dispatch dequeues (no flag)
+ * - change_dequeue_cnt: Number of property change dequeues
+ */
+u64 enqueue_cnt, dequeue_cnt, dispatch_dequeue_cnt, change_dequeue_cnt;
+
+/*
+ * Test scenarios:
+ * 0) Dispatch to local DSQ (terminal DSQ, bypasses BPF scheduler, no
+ * dequeue callbacks)
+ * 1) Dispatch to global DSQ (terminal DSQ, bypasses BPF scheduler, no
+ * dequeue callbacks)
+ * 2) Dispatch to shared user DSQ (enters BPF scheduler, dequeue callbacks
+ * expected)
+ */
+u32 test_scenario;
+
+/*
+ * Per-task state to track lifecycle and validate workflow semantics.
+ * State transitions:
+ * NONE -> ENQUEUED (on enqueue)
+ * ENQUEUED -> DISPATCHED (on dispatch dequeue)
+ * DISPATCHED -> NONE (on property change dequeue or re-enqueue)
+ * ENQUEUED -> NONE (on property change dequeue before dispatch)
+ */
+enum task_state {
+ TASK_NONE = 0,
+ TASK_ENQUEUED,
+ TASK_DISPATCHED,
+};
+
+struct task_ctx {
+ enum task_state state; /* Current state in the workflow */
+ u64 enqueue_seq; /* Sequence number for debugging */
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct task_ctx);
+} task_ctx_stor SEC(".maps");
+
+static struct task_ctx *try_lookup_task_ctx(struct task_struct *p)
+{
+ return bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
+}
+
+s32 BPF_STRUCT_OPS(dequeue_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ /* Always bounce to ops.enqueue() */
+ return prev_cpu;
+}
+
+void BPF_STRUCT_OPS(dequeue_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ struct task_ctx *tctx;
+
+ tctx = try_lookup_task_ctx(p);
+ if (!tctx)
+ return;
+
+ switch (test_scenario) {
+ case 0:
+ /*
+ * Scenario 0: Direct dispatch to the local DSQ.
+ *
+ * Task bypasses BPF scheduler entirely: no enqueue
+ * tracking, no dequeue callbacks. Don't increment counters
+ * or validate state since the task never enters BPF
+ * scheduler management.
+ */
+ scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags);
+ break;
+
+ case 1:
+ /*
+ * Scenario 1: Direct dispatch to the global DSQ.
+ *
+ * Like scenario 0, task bypasses BPF scheduler entirely.
+ * SCX_DSQ_GLOBAL is a terminal DSQ, tasks dispatched to it
+ * leave BPF custody immediately, so no dequeue callbacks
+ * should be triggered.
+ */
+ scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ break;
+
+ case 2:
+ /*
+ * Scenario 2: Dispatch to shared user DSQ.
+ *
+ * Task enters BPF scheduler management: track
+ * enqueue/dequeue lifecycle and validate state
+ * transitions.
+ */
+ __sync_fetch_and_add(&enqueue_cnt, 1);
+
+ /*
+ * Validate state transition: enqueue is only valid from
+ * NONE or DISPATCHED states. Getting enqueue while in
+ * ENQUEUED state indicates a missing dequeue.
+ */
+ if (tctx->state == TASK_ENQUEUED)
+ scx_bpf_error("%d (%s): enqueue while in ENQUEUED state seq=%llu",
+ p->pid, p->comm, tctx->enqueue_seq);
+
+ /* Transition to ENQUEUED state */
+ tctx->state = TASK_ENQUEUED;
+ tctx->enqueue_seq++;
+
+ scx_bpf_dsq_insert(p, SHARED_DSQ, SCX_SLICE_DFL, enq_flags);
+ break;
+ }
+}
+
+void BPF_STRUCT_OPS(dequeue_dequeue, struct task_struct *p, u64 deq_flags)
+{
+ struct task_ctx *tctx;
+
+ __sync_fetch_and_add(&dequeue_cnt, 1);
+
+ tctx = try_lookup_task_ctx(p);
+ if (!tctx)
+ return;
+
+ /*
+ * For scenarios 0 and 1 (terminal DSQs: local and global),
+ * ops.dequeue() should never be called because tasks bypass the
+ * BPF scheduler entirely. If we get here, it's a kernel bug. We
+ * don't track enqueues for these scenarios, so tctx->enqueue_seq
+ * will be 0.
+ */
+ if (test_scenario == 0) {
+ scx_bpf_error("%d (%s): dequeue called for local DSQ scenario - kernel bug!",
+ p->pid, p->comm);
+ return;
+ }
+ if (test_scenario == 1) {
+ scx_bpf_error("%d (%s): dequeue called for global DSQ scenario - kernel bug!",
+ p->pid, p->comm);
+ return;
+ }
+
+ /*
+ * Validate state: dequeue should only happen from ENQUEUED or
+ * DISPATCHED states. Getting dequeue from NONE indicates a bug.
+ */
+ if (tctx->state == TASK_NONE) {
+ scx_bpf_error("%d (%s): dequeue from NONE state seq=%llu",
+ p->pid, p->comm, tctx->enqueue_seq);
+ return;
+ }
+
+ if (deq_flags & SCX_DEQ_SCHED_CHANGE) {
+ /*
+ * Property change interrupting the workflow. Valid from
+ * both ENQUEUED and DISPATCHED states. Transitions task
+ * back to NONE state.
+ */
+ __sync_fetch_and_add(&change_dequeue_cnt, 1);
+
+ /* Validate state transition */
+ if (tctx->state != TASK_ENQUEUED && tctx->state != TASK_DISPATCHED)
+ scx_bpf_error("%d (%s): invalid property change dequeue state=%d seq=%llu",
+ p->pid, p->comm, tctx->state, tctx->enqueue_seq);
+
+ /* Transition back to NONE: task outside scheduler control */
+ tctx->state = TASK_NONE;
+ } else {
+ /*
+ * Regular dispatch dequeue: normal workflow step. Valid
+ * only from ENQUEUED state (after enqueue, before dispatch
+ * dequeue). Transitions to DISPATCHED state.
+ */
+ __sync_fetch_and_add(&dispatch_dequeue_cnt, 1);
+
+ /*
+ * Dispatch dequeue should not have %SCX_DEQ_SCHED_CHANGE
+ * flag.
+ */
+ if (deq_flags & SCX_DEQ_SCHED_CHANGE)
+ scx_bpf_error("%d (%s): SCX_DEQ_SCHED_CHANGE in dispatch dequeue seq=%llu",
+ p->pid, p->comm, tctx->enqueue_seq);
+
+ /*
+ * Must be in ENQUEUED state.
+ */
+ if (tctx->state != TASK_ENQUEUED)
+ scx_bpf_error("%d (%s): dispatch dequeue from state %d seq=%llu",
+ p->pid, p->comm, tctx->state, tctx->enqueue_seq);
+
+ /*
+ * Transition to DISPATCHED: normal cycle completed
+ * dispatch.
+ */
+ tctx->state = TASK_DISPATCHED;
+ }
+}
+
+void BPF_STRUCT_OPS(dequeue_dispatch, s32 cpu, struct task_struct *prev)
+{
+ scx_bpf_dsq_move_to_local(SHARED_DSQ);
+}
+
+s32 BPF_STRUCT_OPS(dequeue_init_task, struct task_struct *p,
+ struct scx_init_task_args *args)
+{
+ struct task_ctx *tctx;
+
+ tctx = bpf_task_storage_get(&task_ctx_stor, p, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!tctx)
+ return -ENOMEM;
+
+ return 0;
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(dequeue_init)
+{
+ s32 ret;
+
+ ret = scx_bpf_create_dsq(SHARED_DSQ, -1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(dequeue_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops dequeue_ops = {
+ .select_cpu = (void *)dequeue_select_cpu,
+ .enqueue = (void *)dequeue_enqueue,
+ .dequeue = (void *)dequeue_dequeue,
+ .dispatch = (void *)dequeue_dispatch,
+ .init_task = (void *)dequeue_init_task,
+ .init = (void *)dequeue_init,
+ .exit = (void *)dequeue_exit,
+ .name = "dequeue_test",
+};
diff --git a/tools/testing/selftests/sched_ext/dequeue.c b/tools/testing/selftests/sched_ext/dequeue.c
new file mode 100644
index 0000000000000..0ce59299a1b37
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/dequeue.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 NVIDIA Corporation.
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <sched.h>
+#include <pthread.h>
+#include "scx_test.h"
+#include "dequeue.bpf.skel.h"
+
+#define NUM_WORKERS 8
+
+/*
+ * Worker function that creates enqueue/dequeue events. It alternates
+ * between CPU work, sleeping, and affinity changes to trigger dequeues.
+ */
+static void worker_fn(int id)
+{
+ cpu_set_t cpuset;
+ int i;
+ volatile int sum = 0;
+
+ for (i = 0; i < 1000; i++) {
+ int j;
+
+ /* Do some work to trigger scheduling events */
+ for (j = 0; j < 10000; j++)
+ sum += j;
+
+ /* Change affinity to trigger dequeue */
+ if (i % 10 == 0) {
+ CPU_ZERO(&cpuset);
+ /* Rotate through the first 4 CPUs */
+ CPU_SET(i % 4, &cpuset);
+ sched_setaffinity(0, sizeof(cpuset), &cpuset);
+ }
+
+ /* Do additional work */
+ for (j = 0; j < 10000; j++)
+ sum += j;
+
+ /* Sleep to trigger dequeue */
+ usleep(1000 + (id * 100));
+ }
+
+ exit(0);
+}
+
+static enum scx_test_status run_scenario(struct dequeue *skel, u32 scenario,
+ const char *scenario_name)
+{
+ struct bpf_link *link;
+ pid_t pids[NUM_WORKERS];
+ int i, status;
+ u64 enq_start, deq_start, dispatch_deq_start, change_deq_start;
+ u64 enq_delta, deq_delta, dispatch_deq_delta, change_deq_delta;
+
+ /* Set the test scenario */
+ skel->bss->test_scenario = scenario;
+
+ /* Record starting counts */
+ enq_start = skel->bss->enqueue_cnt;
+ deq_start = skel->bss->dequeue_cnt;
+ dispatch_deq_start = skel->bss->dispatch_dequeue_cnt;
+ change_deq_start = skel->bss->change_dequeue_cnt;
+
+ link = bpf_map__attach_struct_ops(skel->maps.dequeue_ops);
+ SCX_FAIL_IF(!link, "Failed to attach struct_ops for scenario %s", scenario_name);
+
+ /* Fork worker processes to generate enqueue/dequeue events */
+ for (i = 0; i < NUM_WORKERS; i++) {
+ pids[i] = fork();
+ SCX_FAIL_IF(pids[i] < 0, "Failed to fork worker %d", i);
+
+ if (pids[i] == 0) {
+ worker_fn(i);
+ /* Should not reach here */
+ exit(1);
+ }
+ }
+
+ /* Wait for all workers to complete */
+ for (i = 0; i < NUM_WORKERS; i++) {
+ SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
+ "Failed to wait for worker %d", i);
+ SCX_FAIL_IF(status != 0, "Worker %d exited with status %d", i, status);
+ }
+
+ bpf_link__destroy(link);
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_UNREG));
+
+ /* Calculate deltas */
+ enq_delta = skel->bss->enqueue_cnt - enq_start;
+ deq_delta = skel->bss->dequeue_cnt - deq_start;
+ dispatch_deq_delta = skel->bss->dispatch_dequeue_cnt - dispatch_deq_start;
+ change_deq_delta = skel->bss->change_dequeue_cnt - change_deq_start;
+
+ printf("%s:\n", scenario_name);
+ printf(" enqueues: %lu\n", (unsigned long)enq_delta);
+ printf(" dequeues: %lu (dispatch: %lu, property_change: %lu)\n",
+ (unsigned long)deq_delta,
+ (unsigned long)dispatch_deq_delta,
+ (unsigned long)change_deq_delta);
+
+ /*
+ * Validate enqueue/dequeue lifecycle tracking.
+ *
+ * For scenarios 0 and 1 (local and global DSQs), both enqueues and
+ * dequeues should be 0 because tasks bypass the BPF scheduler
+ * entirely: tasks never enter BPF scheduler's custody.
+ *
+ * For scenario 2 (user DSQ), we expect both enqueues and dequeues.
+ *
+ * The BPF code does strict state machine validation with
+ * scx_bpf_error() to ensure the workflow semantics are correct. If
+ * we reach this point without errors, the semantics are validated
+ * correctly.
+ */
+ if (scenario == 0 || scenario == 1) {
+ /* Terminal DSQs: tasks bypass BPF scheduler completely */
+ SCX_EQ(enq_delta, 0);
+ SCX_EQ(deq_delta, 0);
+ SCX_EQ(dispatch_deq_delta, 0);
+ SCX_EQ(change_deq_delta, 0);
+ } else {
+ /* User DSQ: tasks enter BPF scheduler's custody */
+ SCX_GT(enq_delta, 0);
+ SCX_GT(deq_delta, 0);
+ /* Validate 1:1 enqueue/dequeue pairing */
+ SCX_EQ(enq_delta, deq_delta);
+ }
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct dequeue *skel;
+
+ skel = dequeue__open();
+ SCX_FAIL_IF(!skel, "Failed to open skel");
+ SCX_ENUM_INIT(skel);
+ SCX_FAIL_IF(dequeue__load(skel), "Failed to load skel");
+
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct dequeue *skel = ctx;
+ enum scx_test_status status;
+
+ status = run_scenario(skel, 0, "Local DSQ (terminal, direct dispatch)");
+ if (status != SCX_TEST_PASS)
+ return status;
+
+ status = run_scenario(skel, 1, "Global DSQ (terminal, SCX_DSQ_GLOBAL)");
+ if (status != SCX_TEST_PASS)
+ return status;
+
+ status = run_scenario(skel, 2, "User DSQ (non-terminal, BPF custody)");
+ if (status != SCX_TEST_PASS)
+ return status;
+
+ printf("\n=== Summary ===\n");
+ printf("Total enqueues: %lu\n", (unsigned long)skel->bss->enqueue_cnt);
+ printf("Total dequeues: %lu\n", (unsigned long)skel->bss->dequeue_cnt);
+ printf(" Dispatch dequeues: %lu (no flag, normal workflow)\n",
+ (unsigned long)skel->bss->dispatch_dequeue_cnt);
+ printf(" Property change dequeues: %lu (SCX_DEQ_SCHED_CHANGE flag)\n",
+ (unsigned long)skel->bss->change_dequeue_cnt);
+ printf("\nAll scenarios passed - no state machine violations detected\n");
+ printf("-> Validated: Local DSQ dispatch bypasses BPF scheduler\n");
+ printf("-> Validated: Global DSQ dispatch bypasses BPF scheduler\n");
+ printf("-> Validated: User DSQ dispatch triggers dequeue callbacks\n");
+ printf("-> Validated: Dispatch dequeues have no flags (normal workflow)\n");
+ printf("-> Validated: Property change dequeues have SCX_DEQ_SCHED_CHANGE flag\n");
+ printf("-> Validated: No duplicate enqueues or invalid state transitions\n");
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct dequeue *skel = ctx;
+
+ dequeue__destroy(skel);
+}
+
+struct scx_test dequeue_test = {
+ .name = "dequeue",
+ .description = "Verify ops.dequeue() semantics",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+
+REGISTER_SCX_TEST(&dequeue_test)
--
2.53.0
Powered by blists - more mailing lists