[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAKfTPtCgiEBhq49r=r88swExCJK80vYbbC+mr9KU+J-HsUsZOQ@mail.gmail.com>
Date: Tue, 2 Oct 2018 09:09:53 +0200
From: Vincent Guittot <vincent.guittot@...aro.org>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
linux-kernel <linux-kernel@...r.kernel.org>
Cc: Miguel Ojeda <miguel.ojeda.sandonis@...il.com>,
Borislav Petkov <bp@...en8.de>, dou_liyang@....com
Subject: Re: [PATCH] sched/pelt: fix warning and cleanup irq pelt config
On Tue, 25 Sep 2018 at 11:17, Vincent Guittot
<vincent.guittot@...aro.org> wrote:
>
> Create a config for enabling irq load tracking in the scheduler.
> irq load tracking is useful only when irq or paravirtual time is
> accounted but it's only possible with SMP for now.
>
> Also use __maybe_unused to remove the compilation warning in
> update_rq_clock_task() that has been introduced by:
> commit 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
Gentle ping.
>
> Reported-by: Dou Liyang <douly.fnst@...fujitsu.com>
> Reported-by: Miguel Ojeda <miguel.ojeda.sandonis@...il.com>
> Suggested-by: Ingo Molnar <mingo@...hat.com>
> Fixes: 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
> Signed-off-by: Vincent Guittot <vincent.guittot@...aro.org>
> ---
> init/Kconfig | 5 +++++
> kernel/sched/core.c | 7 +++----
> kernel/sched/fair.c | 2 +-
> kernel/sched/pelt.c | 2 +-
> kernel/sched/pelt.h | 2 +-
> kernel/sched/sched.h | 5 ++---
> 6 files changed, 13 insertions(+), 10 deletions(-)
>
> diff --git a/init/Kconfig b/init/Kconfig
> index 1e234e2..317d5cc 100644
> --- a/init/Kconfig
> +++ b/init/Kconfig
> @@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
>
> If in doubt, say N here.
>
> +config HAVE_SCHED_AVG_IRQ
> + def_bool y
> + depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
> + depends on SMP
> +
> config BSD_PROCESS_ACCT
> bool "BSD Process Accounting"
> depends on MULTIUSER
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 625bc98..bf7b745 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
> * In theory, the compile should just see 0 here, and optimize out the call
> * to sched_rt_avg_update. But I don't trust it...
> */
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> - s64 steal = 0, irq_delta = 0;
> -#endif
> + s64 __maybe_unused steal = 0, irq_delta = 0;
> +
> #ifdef CONFIG_IRQ_TIME_ACCOUNTING
> irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
>
> @@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
>
> rq->clock_task += delta;
>
> -#ifdef HAVE_SCHED_AVG_IRQ
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
> if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
> update_irq_load_avg(rq, irq_delta + steal);
> #endif
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 6bd142d..2c05aac 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -7249,7 +7249,7 @@ static inline bool others_have_blocked(struct rq *rq)
> if (READ_ONCE(rq->avg_dl.util_avg))
> return true;
>
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
> if (READ_ONCE(rq->avg_irq.util_avg))
> return true;
> #endif
> diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
> index 35475c0..48a1264 100644
> --- a/kernel/sched/pelt.c
> +++ b/kernel/sched/pelt.c
> @@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
> return 0;
> }
>
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
> /*
> * irq:
> *
> diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
> index d2894db..7e56b48 100644
> --- a/kernel/sched/pelt.h
> +++ b/kernel/sched/pelt.h
> @@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
> int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
> int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
>
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
> int update_irq_load_avg(struct rq *rq, u64 running);
> #else
> static inline int
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 3a4ef8f..f3477e0 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -861,8 +861,7 @@ struct rq {
>
> struct sched_avg avg_rt;
> struct sched_avg avg_dl;
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> -#define HAVE_SCHED_AVG_IRQ
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
> struct sched_avg avg_irq;
> #endif
> u64 idle_stamp;
> @@ -2222,7 +2221,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
> }
> #endif
>
> -#ifdef HAVE_SCHED_AVG_IRQ
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
> static inline unsigned long cpu_util_irq(struct rq *rq)
> {
> return rq->avg_irq.util_avg;
> --
> 2.7.4
>
Powered by blists - more mailing lists