[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJZ5v0gg7dz44s_fjtJKm8Sv87RZypxYe3EwXX74oAuk31GW1Q@mail.gmail.com>
Date: Fri, 30 Oct 2020 16:26:32 +0100
From: "Rafael J. Wysocki" <rafael@...nel.org>
To: Ionela Voinescu <ionela.voinescu@....com>
Cc: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
"Rafael J. Wysocki" <rjw@...ysocki.net>,
Viresh Kumar <viresh.kumar@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Quentin Perret <qperret@...gle.com>,
Valentin Schneider <valentin.schneider@....com>,
Linux PM <linux-pm@...r.kernel.org>,
Linux ARM <linux-arm-kernel@...ts.infradead.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH RESEND v2 1/3] sched/topology,schedutil: wrap sched
domains rebuild
On Tue, Oct 27, 2020 at 7:08 PM Ionela Voinescu <ionela.voinescu@....com> wrote:
>
> Add the rebuild_sched_domains_energy() function to wrap the functionality
> that rebuilds the scheduling domains if any of the Energy Aware Scheduling
> (EAS) initialisation conditions change. This functionality is used when
> schedutil is added or removed or when EAS is enabled or disabled
> through the sched_energy_aware sysctl.
>
> Therefore, create a single function that is used in both these cases and
> that can be later reused.
>
> Signed-off-by: Ionela Voinescu <ionela.voinescu@....com>
> Acked-by: Quentin Perret <qperret@...gle.com>
> Cc: Ingo Molnar <mingo@...hat.com>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Rafael J. Wysocki <rjw@...ysocki.net>
> Cc: Viresh Kumar <viresh.kumar@...aro.org>
For the schedutil part:
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
and I'm assuming the patch to be taken care of by Peter.
> ---
> include/linux/sched/topology.h | 8 ++++++++
> kernel/sched/cpufreq_schedutil.c | 9 +--------
> kernel/sched/topology.c | 18 +++++++++++-------
> 3 files changed, 20 insertions(+), 15 deletions(-)
>
> diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
> index 9ef7bf686a9f..8f0f778b7c91 100644
> --- a/include/linux/sched/topology.h
> +++ b/include/linux/sched/topology.h
> @@ -225,6 +225,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
>
> #endif /* !CONFIG_SMP */
>
> +#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
> +extern void rebuild_sched_domains_energy(void);
> +#else
> +static inline void rebuild_sched_domains_energy(void)
> +{
> +}
> +#endif
> +
> #ifndef arch_scale_cpu_capacity
> /**
> * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
> diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> index e254745a82cb..37b303890336 100644
> --- a/kernel/sched/cpufreq_schedutil.c
> +++ b/kernel/sched/cpufreq_schedutil.c
> @@ -899,16 +899,9 @@ struct cpufreq_governor *cpufreq_default_governor(void)
> cpufreq_governor_init(schedutil_gov);
>
> #ifdef CONFIG_ENERGY_MODEL
> -extern bool sched_energy_update;
> -extern struct mutex sched_energy_mutex;
> -
> static void rebuild_sd_workfn(struct work_struct *work)
> {
> - mutex_lock(&sched_energy_mutex);
> - sched_energy_update = true;
> - rebuild_sched_domains();
> - sched_energy_update = false;
> - mutex_unlock(&sched_energy_mutex);
> + rebuild_sched_domains_energy();
> }
> static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
>
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index dd7770226086..270bafb73506 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -211,6 +211,15 @@ unsigned int sysctl_sched_energy_aware = 1;
> DEFINE_MUTEX(sched_energy_mutex);
> bool sched_energy_update;
>
> +void rebuild_sched_domains_energy(void)
> +{
> + mutex_lock(&sched_energy_mutex);
> + sched_energy_update = true;
> + rebuild_sched_domains();
> + sched_energy_update = false;
> + mutex_unlock(&sched_energy_mutex);
> +}
> +
> #ifdef CONFIG_PROC_SYSCTL
> int sched_energy_aware_handler(struct ctl_table *table, int write,
> void *buffer, size_t *lenp, loff_t *ppos)
> @@ -223,13 +232,8 @@ int sched_energy_aware_handler(struct ctl_table *table, int write,
> ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
> if (!ret && write) {
> state = static_branch_unlikely(&sched_energy_present);
> - if (state != sysctl_sched_energy_aware) {
> - mutex_lock(&sched_energy_mutex);
> - sched_energy_update = 1;
> - rebuild_sched_domains();
> - sched_energy_update = 0;
> - mutex_unlock(&sched_energy_mutex);
> - }
> + if (state != sysctl_sched_energy_aware)
> + rebuild_sched_domains_energy();
> }
>
> return ret;
> --
> 2.17.1
>
Powered by blists - more mailing lists