[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190506090859.GK2606@hirez.programming.kicks-ass.net>
Date: Mon, 6 May 2019 11:08:59 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Qais Yousef <qais.yousef@....com>
Cc: Ingo Molnar <mingo@...hat.com>,
Steven Rostedt <rostedt@...dmis.org>,
linux-kernel@...r.kernel.org,
Pavankumar Kondeti <pkondeti@...eaurora.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Uwe Kleine-Konig <u.kleine-koenig@...gutronix.de>
Subject: Re: [PATCH 4/7] sched: Add sched_load_rq tracepoint
On Sun, May 05, 2019 at 12:57:29PM +0100, Qais Yousef wrote:
> +/*
> + * Following tracepoints are not exported in tracefs and provide hooking
> + * mechanisms only for testing and debugging purposes.
> + */
> +DECLARE_TRACE(sched_load_rq,
> + TP_PROTO(int cpu, const char *path, struct sched_avg *avg),
> + TP_ARGS(cpu, path, avg));
> +
> +DECLARE_TRACE(sched_load_se,
> + TP_PROTO(int cpu, const char *path, struct sched_entity *se),
> + TP_ARGS(cpu, path, se));
> +
> +DECLARE_TRACE(sched_overutilized,
> + TP_PROTO(int overutilized),
> + TP_ARGS(overutilized));
This doesn't generate any actual userspace because of the lack of
DEFINE_EVENT() ?
> diff --git a/kernel/sched/sched_tracepoints.h b/kernel/sched/sched_tracepoints.h
> new file mode 100644
> index 000000000000..f4ded705118e
> --- /dev/null
> +++ b/kernel/sched/sched_tracepoints.h
> @@ -0,0 +1,39 @@
Like with the other newly introduced header files, this one is lacking
the normal include guard.
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Scheduler tracepoints that are probe-able only and aren't exported ABI in
> + * tracefs.
> + */
> +
> +#include <trace/events/sched.h>
> +
> +#define SCHED_TP_PATH_LEN 64
> +
> +
> +static __always_inline void sched_tp_load_cfs_rq(struct cfs_rq *cfs_rq)
> +{
> + if (trace_sched_load_rq_enabled()) {
> + int cpu = cpu_of(rq_of(cfs_rq));
> + char path[SCHED_TP_PATH_LEN];
> +
> + cfs_rq_tg_path(cfs_rq, path, SCHED_TP_PATH_LEN);
> + trace_sched_load_rq(cpu, path, &cfs_rq->avg);
> + }
> +}
> +
> +static __always_inline void sched_tp_load_rt_rq(struct rq *rq)
> +{
> + if (trace_sched_load_rq_enabled()) {
> + int cpu = cpu_of(rq);
> +
> + trace_sched_load_rq(cpu, NULL, &rq->avg_rt);
> + }
> +}
> +
> +static __always_inline void sched_tp_load_dl_rq(struct rq *rq)
> +{
> + if (trace_sched_load_rq_enabled()) {
> + int cpu = cpu_of(rq);
> +
> + trace_sched_load_rq(cpu, NULL, &rq->avg_dl);
> + }
> +}
> +static __always_inline void sched_tp_load_se(struct sched_entity *se)
> +{
> + if (trace_sched_load_se_enabled()) {
> + struct cfs_rq *gcfs_rq = group_cfs_rq(se);
> + struct cfs_rq *cfs_rq = cfs_rq_of(se);
> + char path[SCHED_TP_PATH_LEN];
> + int cpu = cpu_of(rq_of(cfs_rq));
> +
> + cfs_rq_tg_path(gcfs_rq, path, SCHED_TP_PATH_LEN);
> + trace_sched_load_se(cpu, path, se);
> + }
> +}
These functions really should be called trace_*()
Also; I _really_ hate how fat they are. Why can't we do simple straight
forward things like:
trace_pelt_cfq(cfq);
trace_pelt_rq(rq);
trace_pelt_se(se);
And then have the thing attached to the event do the fat bits like
extract the path and whatnot.
Powered by blists - more mailing lists