[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190510113013.1193-6-qais.yousef@arm.com>
Date: Fri, 10 May 2019 12:30:11 +0100
From: Qais Yousef <qais.yousef@....com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Steven Rostedt <rostedt@...dmis.org>
Cc: linux-kernel@...r.kernel.org,
Pavankumar Kondeti <pkondeti@...eaurora.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Uwe Kleine-Konig <u.kleine-koenig@...gutronix.de>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Quentin Perret <quentin.perret@....com>,
Qais Yousef <qais.yousef@....com>
Subject: [PATCH v2 5/7] sched: Add pelt_se tracepoint
The new tracepoint allows tracking PELT signals at sched_entity level.
Which is supported in CFS tasks and taskgroups only.
Signed-off-by: Qais Yousef <qais.yousef@....com>
---
include/trace/events/sched.h | 4 ++++
kernel/sched/fair.c | 1 +
kernel/sched/pelt.c | 2 ++
kernel/sched/sched_tracepoints.h | 13 +++++++++++++
4 files changed, 20 insertions(+)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 50346098e026..cbcb47972232 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -596,6 +596,10 @@ DECLARE_TRACE(pelt_rq,
TP_PROTO(int cpu, const char *path, struct sched_avg *avg),
TP_ARGS(cpu, path, avg));
+DECLARE_TRACE(pelt_se,
+ TP_PROTO(int cpu, const char *path, struct sched_entity *se),
+ TP_ARGS(cpu, path, se));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 34782e37387c..81036c34fd29 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3139,6 +3139,7 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
sched_trace_pelt_cfs_rq(cfs_rq);
+ sched_trace_pelt_se(se);
return 1;
}
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 39418e80699f..75eea3b61a97 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -266,6 +266,7 @@ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
{
if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ sched_trace_pelt_se(se);
return 1;
}
@@ -279,6 +280,7 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se
___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
cfs_se_util_change(&se->avg);
+ sched_trace_pelt_se(se);
return 1;
}
diff --git a/kernel/sched/sched_tracepoints.h b/kernel/sched/sched_tracepoints.h
index 5f804629d3b7..d1992f04ee27 100644
--- a/kernel/sched/sched_tracepoints.h
+++ b/kernel/sched/sched_tracepoints.h
@@ -47,4 +47,17 @@ static inline void sched_trace_pelt_dl_rq(struct rq *rq) {}
#endif /* CONFIG_SMP */
+static __always_inline void sched_trace_pelt_se(struct sched_entity *se)
+{
+ if (trace_pelt_se_enabled()) {
+ struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ int cpu = cpu_of(rq_of(cfs_rq));
+ char path[SCHED_TP_PATH_LEN];
+
+ cfs_rq_tg_path(gcfs_rq, path, SCHED_TP_PATH_LEN);
+ trace_pelt_se(cpu, path, se);
+ }
+}
+
#endif /* __SCHED_TRACEPOINTS_H */
--
2.17.1
Powered by blists - more mailing lists