[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20220429085240.akxmdnc3a2s6rq7b@wubuntu>
Date: Fri, 29 Apr 2022 09:52:40 +0100
From: Qais Yousef <qais.yousef@....com>
To: "Peter Zijlstra (Intel)" <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Steven Rostedt <rostedt@...dmis.org>,
Phil Auld <pauld@...hat.com>,
Giovanni Gherdovich <ggherdovich@...e.cz>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/2] sched/fair: Remove sched_trace_*() helper functions
+CC lkml
On 04/28/22 15:43, Qais Yousef wrote:
> From: Dietmar Eggemann <dietmar.eggemann@....com>
>
> We no longer need them as we can use DWARF debug info or BTF + pahole to
> re-generate the required structs to compile against them for a given
> kernel.
>
> This moves the burden of maintaining these helper functions to the
> module.
>
> https://github.com/qais-yousef/sched_tp
>
> Note that pahole v1.15 is required at least for using DWARF. And for BTF
> v1.23 which is not yet released will be required. There's alignment
> problem that will lead to crashes in earlier versions when used with
> BTF.
>
> We should have enough infrastructure to make these helper functions now
> obsolete, so remove them.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@....com>
> [Rewrote commit message to reflect the new alternative]
> Signed-off-by: Qais Yousef <qais.yousef@....com>
> ---
> include/linux/sched.h | 14 -------
> kernel/sched/fair.c | 98 -------------------------------------------
> 2 files changed, 112 deletions(-)
>
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 67f06f72c50e..fc74ea2578b7 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -2378,20 +2378,6 @@ static inline void rseq_syscall(struct pt_regs *regs)
>
> #endif
>
> -const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
> -char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
> -int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
> -
> -const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
> -const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
> -const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
> -
> -int sched_trace_rq_cpu(struct rq *rq);
> -int sched_trace_rq_cpu_capacity(struct rq *rq);
> -int sched_trace_rq_nr_running(struct rq *rq);
> -
> -const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
> -
> #ifdef CONFIG_SCHED_CORE
> extern void sched_core_free(struct task_struct *tsk);
> extern void sched_core_fork(struct task_struct *p);
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 4c420124b5d6..ff1177a4a286 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -11882,101 +11882,3 @@ __init void init_sched_fair_class(void)
> #endif /* SMP */
>
> }
> -
> -/*
> - * Helper functions to facilitate extracting info from tracepoints.
> - */
> -
> -const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq)
> -{
> -#ifdef CONFIG_SMP
> - return cfs_rq ? &cfs_rq->avg : NULL;
> -#else
> - return NULL;
> -#endif
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg);
> -
> -char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len)
> -{
> - if (!cfs_rq) {
> - if (str)
> - strlcpy(str, "(null)", len);
> - else
> - return NULL;
> - }
> -
> - cfs_rq_tg_path(cfs_rq, str, len);
> - return str;
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path);
> -
> -int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq)
> -{
> - return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1;
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu);
> -
> -const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq)
> -{
> -#ifdef CONFIG_SMP
> - return rq ? &rq->avg_rt : NULL;
> -#else
> - return NULL;
> -#endif
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt);
> -
> -const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq)
> -{
> -#ifdef CONFIG_SMP
> - return rq ? &rq->avg_dl : NULL;
> -#else
> - return NULL;
> -#endif
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl);
> -
> -const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq)
> -{
> -#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ)
> - return rq ? &rq->avg_irq : NULL;
> -#else
> - return NULL;
> -#endif
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq);
> -
> -int sched_trace_rq_cpu(struct rq *rq)
> -{
> - return rq ? cpu_of(rq) : -1;
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_rq_cpu);
> -
> -int sched_trace_rq_cpu_capacity(struct rq *rq)
> -{
> - return rq ?
> -#ifdef CONFIG_SMP
> - rq->cpu_capacity
> -#else
> - SCHED_CAPACITY_SCALE
> -#endif
> - : -1;
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_rq_cpu_capacity);
> -
> -const struct cpumask *sched_trace_rd_span(struct root_domain *rd)
> -{
> -#ifdef CONFIG_SMP
> - return rd ? rd->span : NULL;
> -#else
> - return NULL;
> -#endif
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_rd_span);
> -
> -int sched_trace_rq_nr_running(struct rq *rq)
> -{
> - return rq ? rq->nr_running : -1;
> -}
> -EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running);
> --
> 2.25.1
>
Powered by blists - more mailing lists