[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250814150809.140739-15-gmonaco@redhat.com>
Date: Thu, 14 Aug 2025 17:08:06 +0200
From: Gabriele Monaco <gmonaco@...hat.com>
To: linux-kernel@...r.kernel.org,
Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
linux-trace-kernel@...r.kernel.org
Cc: Gabriele Monaco <gmonaco@...hat.com>,
Nam Cao <namcao@...utronix.de>,
Tomas Glozar <tglozar@...hat.com>,
Juri Lelli <jlelli@...hat.com>,
Clark Williams <williams@...hat.com>,
John Kacur <jkacur@...hat.com>
Subject: [RFC PATCH 14/17] sched: Add deadline tracepoints
Add the following tracepoints:
* sched_dl_throttle(dl):
Called when a deadline entity is throttled
* sched_dl_replenish(dl):
Called when a deadline entity's runtime is replenished
* sched_dl_server_start(dl):
Called when a deadline server is started
* sched_dl_server_stop(dl, hard):
Called when a deadline server is stopped (hard) or put to idle
waiting for the next period (!hard)
Those tracepoints can be useful to validate the deadline scheduler with
RV and are not exported to tracefs.
Signed-off-by: Gabriele Monaco <gmonaco@...hat.com>
---
include/trace/events/sched.h | 55 ++++++++++++++++++++++++++++++++++++
kernel/sched/deadline.c | 8 ++++++
2 files changed, 63 insertions(+)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 7b2645b50e78..f34cc1dc4a13 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -609,6 +609,45 @@ TRACE_EVENT(sched_pi_setprio,
__entry->oldprio, __entry->newprio)
);
+/*
+DECLARE_EVENT_CLASS(sched_dl_template,
+
+ TP_PROTO(struct sched_dl_entity *dl),
+
+ TP_ARGS(dl),
+
+ TP_STRUCT__entry(
+ __field( struct task_struct *, tsk )
+ __string( comm, dl->dl_server ? "server" : container_of(dl, struct task_struct, dl)->comm )
+ __field( pid_t, pid )
+ __field( s64, runtime )
+ __field( u64, deadline )
+ __field( int, dl_yielded )
+ ),
+
+ TP_fast_assign(
+ __assign_str(comm);
+ __entry->pid = dl->dl_server ? -1 : container_of(dl, struct task_struct, dl)->pid;
+ __entry->runtime = dl->runtime;
+ __entry->deadline = dl->deadline;
+ __entry->dl_yielded = dl->dl_yielded;
+ ),
+
+ TP_printk("comm=%s pid=%d runtime=%lld deadline=%lld yielded=%d",
+ __get_str(comm), __entry->pid,
+ __entry->runtime, __entry->deadline,
+ __entry->dl_yielded)
+);
+
+DEFINE_EVENT(sched_dl_template, sched_dl_throttle,
+ TP_PROTO(struct sched_dl_entity *dl),
+ TP_ARGS(dl));
+
+DEFINE_EVENT(sched_dl_template, sched_dl_replenish,
+ TP_PROTO(struct sched_dl_entity *dl),
+ TP_ARGS(dl));
+*/
+
#ifdef CONFIG_DETECT_HUNG_TASK
TRACE_EVENT(sched_process_hang,
TP_PROTO(struct task_struct *tsk),
@@ -896,6 +935,22 @@ DECLARE_TRACE(sched_set_need_resched,
TP_PROTO(struct task_struct *tsk, int cpu, int tif),
TP_ARGS(tsk, cpu, tif));
+DECLARE_TRACE(sched_dl_throttle,
+ TP_PROTO(struct sched_dl_entity *dl),
+ TP_ARGS(dl));
+
+DECLARE_TRACE(sched_dl_replenish,
+ TP_PROTO(struct sched_dl_entity *dl),
+ TP_ARGS(dl));
+
+DECLARE_TRACE(sched_dl_server_start,
+ TP_PROTO(struct sched_dl_entity *dl),
+ TP_ARGS(dl));
+
+DECLARE_TRACE(sched_dl_server_stop,
+ TP_PROTO(struct sched_dl_entity *dl, bool hard),
+ TP_ARGS(dl, hard));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index e2d51f4306b3..f8284accb6b4 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -742,6 +742,7 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
dl_se->dl_throttled = 1;
dl_se->dl_defer_armed = 1;
}
+ trace_sched_dl_replenish_tp(dl_se);
}
/*
@@ -852,6 +853,9 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged too much\n");
replenish_dl_new_period(dl_se, rq);
+ } else {
+ /* replenish_dl_new_period is also tracing */
+ trace_sched_dl_replenish_tp(dl_se);
}
if (dl_se->dl_yielded)
@@ -1482,6 +1486,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
+ trace_sched_dl_throttle_tp(dl_se);
dl_se->dl_throttled = 1;
/* If requested, inform the user about runtime overruns. */
@@ -1590,6 +1595,7 @@ void dl_server_start(struct sched_dl_entity *dl_se)
if (!dl_server(dl_se) || dl_se->dl_server_active)
return;
+ trace_sched_dl_server_start_tp(dl_se);
dl_se->dl_server_active = 1;
enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
@@ -1601,6 +1607,7 @@ void dl_server_stop(struct sched_dl_entity *dl_se)
if (!dl_server(dl_se) || !dl_server_active(dl_se))
return;
+ trace_sched_dl_server_stop_tp(dl_se, true);
dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
hrtimer_try_to_cancel(&dl_se->dl_timer);
dl_se->dl_defer_armed = 0;
@@ -1618,6 +1625,7 @@ static bool dl_server_stopped(struct sched_dl_entity *dl_se)
return true;
}
+ trace_sched_dl_server_stop_tp(dl_se, false);
dl_se->dl_server_idle = 1;
return false;
}
--
2.50.1
Powered by blists - more mailing lists