[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJZ5v0g45=0+uLqPD5jib8aQrw8syjMxzd9uPqnTUzxgVCDVkQ@mail.gmail.com>
Date: Tue, 20 Jun 2023 19:40:28 +0200
From: "Rafael J. Wysocki" <rafael@...nel.org>
To: Lukasz Luba <lukasz.luba@....com>
Cc: linux-kernel@...r.kernel.org, linux-trace-kernel@...r.kernel.org,
rafael@...nel.org, linux-pm@...r.kernel.org, rostedt@...dmis.org,
mhiramat@...nel.org, mingo@...hat.com, peterz@...radead.org,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, vschneid@...hat.com, delyank@...com,
qyousef@...gle.com, qyousef@...alina.io,
kernel test robot <lkp@...el.com>
Subject: Re: [RESEND][PATCH v2 3/3] schedutil: trace: Add tracing to capture
filter out requests
On Mon, May 22, 2023 at 4:57 PM Lukasz Luba <lukasz.luba@....com> wrote:
>
> Some of the frequency update requests coming form the task scheduler
> might be filter out. It can happen when the previous request was served
> not that long ago (in a period smaller than provided by the cpufreq driver
> as minimum for frequency update). In such case, we want to know if some of
> the frequency updates cannot make through.
> Export the new tracepoint as well. That would allow to handle it by a
> toolkit for trace analyzes.
>
> Reported-by: kernel test robot <lkp@...el.com> # solved tricky build
> Signed-off-by: Lukasz Luba <lukasz.luba@....com>
> ---
> include/trace/events/sched.h | 4 ++++
> kernel/sched/cpufreq_schedutil.c | 10 ++++++++--
> 2 files changed, 12 insertions(+), 2 deletions(-)
>
> diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
> index dbfb30809f15..e34b7cd5de73 100644
> --- a/include/trace/events/sched.h
> +++ b/include/trace/events/sched.h
> @@ -739,6 +739,10 @@ DECLARE_TRACE(uclamp_update_tsk_tp,
> TP_PROTO(struct task_struct *tsk, int uclamp_id, unsigned int value),
> TP_ARGS(tsk, uclamp_id, value));
>
> +DECLARE_TRACE(schedutil_update_filtered_tp,
> + TP_PROTO(int cpu),
> + TP_ARGS(cpu));
> +
> #endif /* _TRACE_SCHED_H */
>
> /* This part must be outside protection */
> diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> index f462496e5c07..4f9daf258a65 100644
> --- a/kernel/sched/cpufreq_schedutil.c
> +++ b/kernel/sched/cpufreq_schedutil.c
> @@ -6,6 +6,8 @@
> * Author: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
> */
>
> +EXPORT_TRACEPOINT_SYMBOL_GPL(schedutil_update_filtered_tp);
> +
> #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
>
> struct sugov_tunables {
> @@ -318,8 +320,10 @@ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
>
> ignore_dl_rate_limit(sg_cpu);
>
> - if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
> + if (!sugov_should_update_freq(sg_cpu->sg_policy, time)) {
> + trace_schedutil_update_filtered_tp(sg_cpu->cpu);
It looks like the tracepoint can be added to
sugov_should_update_freq() for less code duplication.
> return false;
> + }
>
> sugov_get_util(sg_cpu);
> sugov_iowait_apply(sg_cpu, time, max_cap);
> @@ -446,8 +450,10 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
>
> ignore_dl_rate_limit(sg_cpu);
>
> - if (!sugov_should_update_freq(sg_policy, time))
> + if (!sugov_should_update_freq(sg_policy, time)) {
> + trace_schedutil_update_filtered_tp(sg_cpu->cpu);
> goto unlock;
> + }
>
> next_f = sugov_next_freq_shared(sg_cpu, time);
>
> --
Powered by blists - more mailing lists