[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250919140954.104920-18-gmonaco@redhat.com>
Date: Fri, 19 Sep 2025 16:09:51 +0200
From: Gabriele Monaco <gmonaco@...hat.com>
To: linux-kernel@...r.kernel.org,
Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
linux-trace-kernel@...r.kernel.org
Cc: Gabriele Monaco <gmonaco@...hat.com>,
Nam Cao <namcao@...utronix.de>,
Tomas Glozar <tglozar@...hat.com>,
Juri Lelli <jlelli@...hat.com>,
Clark Williams <williams@...hat.com>,
John Kacur <jkacur@...hat.com>
Subject: [PATCH v2 17/20] sched: Add deadline tracepoints
Add the following tracepoints:
* sched_dl_throttle(dl):
Called when a deadline entity is throttled
* sched_dl_replenish(dl):
Called when a deadline entity's runtime is replenished
* sched_dl_server_start(dl):
Called when a deadline server is started
* sched_dl_server_stop(dl, hard):
Called when a deadline server is stopped (hard) or put to idle
waiting for the next period (!hard)
Those tracepoints can be useful to validate the deadline scheduler with
RV and are not exported to tracefs.
Signed-off-by: Gabriele Monaco <gmonaco@...hat.com>
---
include/trace/events/sched.h | 16 ++++++++++++++++
kernel/sched/core.c | 4 ++++
kernel/sched/deadline.c | 9 +++++++++
3 files changed, 29 insertions(+)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 7b2645b50e78..6dc5cd3e9fc4 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -896,6 +896,22 @@ DECLARE_TRACE(sched_set_need_resched,
TP_PROTO(struct task_struct *tsk, int cpu, int tif),
TP_ARGS(tsk, cpu, tif));
+DECLARE_TRACE(sched_dl_throttle,
+ TP_PROTO(struct sched_dl_entity *dl, int cpu),
+ TP_ARGS(dl, cpu));
+
+DECLARE_TRACE(sched_dl_replenish,
+ TP_PROTO(struct sched_dl_entity *dl, int cpu),
+ TP_ARGS(dl, cpu));
+
+DECLARE_TRACE(sched_dl_server_start,
+ TP_PROTO(struct sched_dl_entity *dl, int cpu),
+ TP_ARGS(dl, cpu));
+
+DECLARE_TRACE(sched_dl_server_stop,
+ TP_PROTO(struct sched_dl_entity *dl, int cpu, bool hard),
+ TP_ARGS(dl, cpu, hard));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 334ff5b214d7..b236fc762e73 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -120,6 +120,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_entry_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_exit_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_set_need_resched_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_throttle_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_replenish_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_server_start_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_server_stop_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index f25301267e47..187175607682 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -742,6 +742,7 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
dl_se->dl_throttled = 1;
dl_se->dl_defer_armed = 1;
}
+ trace_sched_dl_replenish_tp(dl_se, cpu_of(rq));
}
/*
@@ -852,6 +853,9 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged too much\n");
replenish_dl_new_period(dl_se, rq);
+ } else {
+ /* replenish_dl_new_period is also tracing */
+ trace_sched_dl_replenish_tp(dl_se, cpu_of(rq));
}
if (dl_se->dl_yielded)
@@ -1349,6 +1353,7 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
return;
+ trace_sched_dl_throttle_tp(dl_se, cpu_of(rq));
dl_se->dl_throttled = 1;
if (dl_se->runtime > 0)
dl_se->runtime = 0;
@@ -1482,6 +1487,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
+ trace_sched_dl_throttle_tp(dl_se, cpu_of(rq));
dl_se->dl_throttled = 1;
/* If requested, inform the user about runtime overruns. */
@@ -1592,6 +1598,7 @@ void dl_server_start(struct sched_dl_entity *dl_se)
if (!dl_server(dl_se) || dl_se->dl_server_active)
return;
+ trace_sched_dl_server_start_tp(dl_se, cpu_of(rq));
dl_se->dl_server_active = 1;
enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
@@ -1603,6 +1610,7 @@ void dl_server_stop(struct sched_dl_entity *dl_se)
if (!dl_server(dl_se) || !dl_server_active(dl_se))
return;
+ trace_sched_dl_server_stop_tp(dl_se, cpu_of(dl_se->rq), true);
dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
hrtimer_try_to_cancel(&dl_se->dl_timer);
dl_se->dl_defer_armed = 0;
@@ -1620,6 +1628,7 @@ static bool dl_server_stopped(struct sched_dl_entity *dl_se)
return true;
}
+ trace_sched_dl_server_stop_tp(dl_se, cpu_of(dl_se->rq), false);
dl_se->dl_server_idle = 1;
return false;
}
--
2.51.0
Powered by blists - more mailing lists