[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250131070938.95551-4-changwoo@igalia.com>
Date: Fri, 31 Jan 2025 16:09:30 +0900
From: Changwoo Min <changwoo@...lia.com>
To: tj@...nel.org,
void@...ifault.com,
arighi@...dia.com
Cc: kernel-dev@...lia.com,
linux-kernel@...r.kernel.org,
Changwoo Min <changwoo@...lia.com>
Subject: [PATCH v3 03/11] sched_ext: Add an event, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE
Add a core event, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, which represents how
many times a BPF scheduler tries to dispatch to an offlined local DSQ.
__scx_add_event() is used since the caller holds an rq lock,
so the preemption has already been disabled.
Signed-off-by: Changwoo Min <changwoo@...lia.com>
---
kernel/sched/ext.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 4d3b32aca48d..041b0af3551a 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1449,6 +1449,12 @@ struct scx_event_stats {
* the core scheduler code silently picks a fallback CPU.
*/
u64 SCX_EV_SELECT_CPU_FALLBACK;
+
+ /*
+ * When dispatching to a local DSQ, the CPU may have gone offline in
+ * the meantime. In this case, the task is bounced to the global DSQ.
+ */
+ u64 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
};
/*
@@ -2644,6 +2650,7 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
dispatch_enqueue(find_global_dsq(p), p,
enq_flags | SCX_ENQ_CLEAR_OPSS);
+ __scx_add_event(SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
return;
}
@@ -4970,6 +4977,7 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
scx_bpf_events(&events, sizeof(events));
scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
+ scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
memcpy(ei->dump + dump_len - sizeof(trunc_marker),
@@ -7104,6 +7112,7 @@ __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
for_each_possible_cpu(cpu) {
e_cpu = per_cpu_ptr(&event_stats_cpu, cpu);
scx_agg_event(&e_sys, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
}
/*
--
2.48.1
Powered by blists - more mailing lists