[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAEi0qNmWDQpbj6NjS+fbX1krc9-QEa7LujWydjy8eaeczJxSCg@mail.gmail.com>
Date: Wed, 26 Jul 2017 22:49:34 -0700
From: "Joel Fernandes (Google)" <joel.opensrc@...il.com>
To: Viresh Kumar <viresh.kumar@...aro.org>
Cc: Rafael Wysocki <rjw@...ysocki.net>, Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
linux-pm@...r.kernel.org,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
smuckle.linux@...il.com, eas-dev@...ts.linaro.org,
Joel Fernandes <joelaf@...gle.com>
Subject: Re: [Eas-dev] [PATCH V4 2/3] cpufreq: schedutil: Process remote
callback for shared policies
On Wed, Jul 26, 2017 at 2:22 AM, Viresh Kumar <viresh.kumar@...aro.org> wrote:
> This patch updates the schedutil governor to process cpufreq utilization
> update hooks called for remote CPUs where the remote CPU is managed by
> the cpufreq policy of the local CPU.
>
> Based on initial work from Steve Muckle.
>
> Signed-off-by: Viresh Kumar <viresh.kumar@...aro.org>
Reviewed-by: Joel Fernandes <joelaf@...gle.com>
thanks,
-Joel
> ---
> kernel/sched/cpufreq_schedutil.c | 21 ++++++++++-----------
> 1 file changed, 10 insertions(+), 11 deletions(-)
>
> diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> index bb834747e49b..c3baf70d360c 100644
> --- a/kernel/sched/cpufreq_schedutil.c
> +++ b/kernel/sched/cpufreq_schedutil.c
> @@ -72,13 +72,12 @@ static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
>
> /************************ Governor internals ***********************/
>
> -static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time,
> - int target_cpu)
> +static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
> {
> s64 delta_ns;
>
> - /* Don't allow remote callbacks */
> - if (smp_processor_id() != target_cpu)
> + /* Allow remote callbacks only on the CPUs sharing cpufreq policy */
> + if (!cpumask_test_cpu(smp_processor_id(), sg_policy->policy->cpus))
> return false;
>
> if (sg_policy->work_in_progress)
> @@ -159,12 +158,12 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
> return cpufreq_driver_resolve_freq(policy, freq);
> }
>
> -static void sugov_get_util(unsigned long *util, unsigned long *max)
> +static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
> {
> - struct rq *rq = this_rq();
> + struct rq *rq = cpu_rq(cpu);
> unsigned long cfs_max;
>
> - cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id());
> + cfs_max = arch_scale_cpu_capacity(NULL, cpu);
>
> *util = min(rq->cfs.avg.util_avg, cfs_max);
> *max = cfs_max;
> @@ -226,7 +225,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
> sugov_set_iowait_boost(sg_cpu, time, flags);
> sg_cpu->last_update = time;
>
> - if (!sugov_should_update_freq(sg_policy, time, hook->cpu))
> + if (!sugov_should_update_freq(sg_policy, time))
> return;
>
> busy = sugov_cpu_is_busy(sg_cpu);
> @@ -234,7 +233,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
> if (flags & SCHED_CPUFREQ_RT_DL) {
> next_f = policy->cpuinfo.max_freq;
> } else {
> - sugov_get_util(&util, &max);
> + sugov_get_util(&util, &max, hook->cpu);
> sugov_iowait_boost(sg_cpu, &util, &max);
> next_f = get_next_freq(sg_policy, util, max);
> /*
> @@ -295,7 +294,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
> unsigned long util, max;
> unsigned int next_f;
>
> - sugov_get_util(&util, &max);
> + sugov_get_util(&util, &max, hook->cpu);
>
> raw_spin_lock(&sg_policy->update_lock);
>
> @@ -306,7 +305,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
> sugov_set_iowait_boost(sg_cpu, time, flags);
> sg_cpu->last_update = time;
>
> - if (sugov_should_update_freq(sg_policy, time, hook->cpu)) {
> + if (sugov_should_update_freq(sg_policy, time)) {
> if (flags & SCHED_CPUFREQ_RT_DL)
> next_f = sg_policy->policy->cpuinfo.max_freq;
Powered by blists - more mailing lists