[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190304143928.GA13701@chenyu-office.sh.intel.com>
Date: Mon, 4 Mar 2019 22:39:28 +0800
From: Yu Chen <yu.c.chen@...el.com>
To: "Rafael J. Wysocki" <rjw@...ysocki.net>
Cc: Linux PM <linux-pm@...r.kernel.org>,
LKML <linux-kernel@...r.kernel.org>,
Viresh Kumar <viresh.kumar@...aro.org>,
Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
Gabriele Mazzotta <gabriele.mzt@...il.com>
Subject: Re: [RFT][Update][PATCH 2/2] cpufreq: intel_pstate: Update max CPU
frequency on global turbo changes
On Fri, Mar 01, 2019 at 01:57:06PM +0100, Rafael J. Wysocki wrote:
> From: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
>
> While the cpuinfo.max_freq value doesn't really matter for
> intel_pstate in the active mode, in the passive mode it is used by
> governors as the maximum physical frequency of the CPU and the
> results of governor computations generally depend on it. Also it
> is made available to user space via sysfs and it should match the
> current HW configuration.
>
> For this reason, make intel_pstate update cpuinfo.max_freq for all
> CPUs if it detects a global change of turbo frequency settings from
> "disable" to "enable" or the other way associated with a _PPC change
> notification from the platform firmware.
>
> Note that policy_is_inactive() and cpufreq_set_policy() need to be
> made available to it for this purpose.
>
> Link: https://bugzilla.kernel.org/show_bug.cgi?id=200759
> Reported-by: Gabriele Mazzotta <gabriele.mzt@...il.com>
> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
> ---
>
> Update, because the patch sent previously doesn't build, due to an extra
> arg declared for intel_pstate_update_max_freq().
>
> ---
> drivers/cpufreq/cpufreq.c | 12 ++----------
> drivers/cpufreq/intel_pstate.c | 33 ++++++++++++++++++++++++++++++++-
> include/linux/cpufreq.h | 7 +++++++
> 3 files changed, 41 insertions(+), 11 deletions(-)
>
> Index: linux-pm/drivers/cpufreq/intel_pstate.c
> ===================================================================
> --- linux-pm.orig/drivers/cpufreq/intel_pstate.c
> +++ linux-pm/drivers/cpufreq/intel_pstate.c
> @@ -897,6 +897,36 @@ static void intel_pstate_update_policies
> cpufreq_update_policy(cpu);
> }
>
> +static void intel_pstate_update_max_freq(unsigned int cpu)
> +{
> + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
> + struct cpufreq_policy new_policy;
> + struct cpudata *cpudata;
> +
> + if (!policy)
> + return;
> +
> + down_write(&policy->rwsem);
> +
> + if (policy_is_inactive(policy))
> + goto unlock;
> +
> + cpudata = all_cpu_data[cpu];
> + policy->cpuinfo.max_freq = global.turbo_disabled_upd ?
> + cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
> +
> + memcpy(&new_policy, policy, sizeof(*policy));
> + new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
> + new_policy.min = min(policy->user_policy.min, new_policy.max);
> +
> + cpufreq_set_policy(policy, &new_policy);
> +
> +unlock:
> + up_write(&policy->rwsem);
> +
> + cpufreq_cpu_put(policy);
> +}
> +
I tried to test on a macbook in hand however I did not see the _PPC
notifier on this machine so it might not cover the code path in
this patch. I checked the cpufreq with this patch using
different load and the cpufreq scales well.
> static void intel_pstate_update_limits(unsigned int cpu)
> {
> mutex_lock(&intel_pstate_driver_lock);
> @@ -908,7 +938,8 @@ static void intel_pstate_update_limits(u
> */
> if (global.turbo_disabled_upd != global.turbo_disabled) {
> global.turbo_disabled_upd = global.turbo_disabled;
> - intel_pstate_update_policies();
> + for_each_possible_cpu(cpu)
> + intel_pstate_update_max_freq(cpu);
> } else {
> cpufreq_update_policy(cpu);
> }
> Index: linux-pm/drivers/cpufreq/cpufreq.c
> ===================================================================
> --- linux-pm.orig/drivers/cpufreq/cpufreq.c
> +++ linux-pm/drivers/cpufreq/cpufreq.c
> @@ -34,11 +34,6 @@
>
> static LIST_HEAD(cpufreq_policy_list);
>
> -static inline bool policy_is_inactive(struct cpufreq_policy *policy)
> -{
> - return cpumask_empty(policy->cpus);
> -}
> -
> /* Macros to iterate over CPU policies */
> #define for_each_suitable_policy(__policy, __active) \
> list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
> @@ -675,9 +670,6 @@ static ssize_t show_scaling_cur_freq(str
> return ret;
> }
>
> -static int cpufreq_set_policy(struct cpufreq_policy *policy,
> - struct cpufreq_policy *new_policy);
> -
> /**
> * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
> */
> @@ -2235,8 +2227,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
> *
> * The cpuinfo part of @policy is not updated by this function.
> */
There first seems to be some patching error when applying this on
top of upstream 5.0, but I realized that this patch is based on
intel-next.
Thanks,
Ryan
> -static int cpufreq_set_policy(struct cpufreq_policy *policy,
> - struct cpufreq_policy *new_policy)
> +int cpufreq_set_policy(struct cpufreq_policy *policy,
> + struct cpufreq_policy *new_policy)
> {
> struct cpufreq_governor *old_gov;
> int ret;
> Index: linux-pm/include/linux/cpufreq.h
> ===================================================================
> --- linux-pm.orig/include/linux/cpufreq.h
> +++ linux-pm/include/linux/cpufreq.h
> @@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpu
> static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
> #endif
>
> +static inline bool policy_is_inactive(struct cpufreq_policy *policy)
> +{
> + return cpumask_empty(policy->cpus);
> +}
> +
> static inline bool policy_is_shared(struct cpufreq_policy *policy)
> {
> return cpumask_weight(policy->cpus) > 1;
> @@ -194,6 +199,8 @@ void disable_cpufreq(void);
>
> u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
> int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
> +int cpufreq_set_policy(struct cpufreq_policy *policy,
> + struct cpufreq_policy *new_policy);
> void cpufreq_update_policy(unsigned int cpu);
> void cpufreq_update_limits(unsigned int cpu);
> bool have_governor_per_policy(void);
>
Powered by blists - more mailing lists