[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250131070938.95551-8-changwoo@igalia.com>
Date: Fri, 31 Jan 2025 16:09:34 +0900
From: Changwoo Min <changwoo@...lia.com>
To: tj@...nel.org,
void@...ifault.com,
arighi@...dia.com
Cc: kernel-dev@...lia.com,
linux-kernel@...r.kernel.org,
Changwoo Min <changwoo@...lia.com>
Subject: [PATCH v3 07/11] sched_ext: Add an event, SCX_EV_BYPASS_DISPATCH
Add a core event, SCX_EV_BYPASS_DISPATCH, which represents how many
tasks have been dispatched in the bypass mode.
__scx_add_event() is used since the caller holds an rq lock,
so the preemption has already been disabled.
Signed-off-by: Changwoo Min <changwoo@...lia.com>
---
kernel/sched/ext.c | 27 +++++++++++++++++++++++----
1 file changed, 23 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 07d54b52e971..236cdb0071eb 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1468,6 +1468,11 @@ struct scx_event_stats {
*/
u64 SCX_EV_ENQ_SKIP_EXITING;
+ /*
+ * The number of tasks dispatched in the bypassing mode.
+ */
+ u64 SCX_EV_BYPASS_DISPATCH;
+
/*
* The number of times the bypassing mode has been activated.
*/
@@ -2869,11 +2874,17 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
}
/* if there already are tasks to run, nothing to do */
- if (rq->scx.local_dsq.nr)
+ if (rq->scx.local_dsq.nr) {
+ if (scx_rq_bypassing(rq))
+ __scx_add_event(SCX_EV_BYPASS_DISPATCH, 1);
goto has_tasks;
+ }
- if (consume_global_dsq(rq))
+ if (consume_global_dsq(rq)) {
+ if (scx_rq_bypassing(rq))
+ __scx_add_event(SCX_EV_BYPASS_DISPATCH, 1);
goto has_tasks;
+ }
if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
goto no_tasks;
@@ -2899,10 +2910,16 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
rq->scx.flags |= SCX_RQ_BAL_KEEP;
goto has_tasks;
}
- if (rq->scx.local_dsq.nr)
+ if (rq->scx.local_dsq.nr) {
+ if (scx_rq_bypassing(rq))
+ __scx_add_event(SCX_EV_BYPASS_DISPATCH, 1);
goto has_tasks;
- if (consume_global_dsq(rq))
+ }
+ if (consume_global_dsq(rq)) {
+ if (scx_rq_bypassing(rq))
+ __scx_add_event(SCX_EV_BYPASS_DISPATCH, 1);
goto has_tasks;
+ }
/*
* ops.dispatch() can trap us in this loop by repeatedly
@@ -5001,6 +5018,7 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
+ scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
@@ -7138,6 +7156,7 @@ __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
scx_agg_event(&e_sys, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_DISPATCH);
scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_ACTIVATE);
}
--
2.48.1
Powered by blists - more mailing lists