[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2345253.LYi3vV7ftd@kreacher>
Date: Mon, 09 Nov 2020 17:55:42 +0100
From: "Rafael J. Wysocki" <rjw@...ysocki.net>
To: Linux PM <linux-pm@...r.kernel.org>
Cc: "Rafael J. Wysocki" <rafael@...nel.org>,
Viresh Kumar <viresh.kumar@...aro.org>,
Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
Zhang Rui <rui.zhang@...el.com>,
LKML <linux-kernel@...r.kernel.org>,
Doug Smythies <dsmythies@...us.net>
Subject: [PATCH v2 4/4] cpufreq: intel_pstate: Take CPUFREQ_GOV_FLAG_STRICT_TARGET into account
From: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
Make intel_pstate take the new CPUFREQ_GOV_FLAG_STRICT_TARGET
governor flag into account when it operates in the passive mode with
HWP enabled, so as to fix the "powersave" governor behavior in that
case (currently, HWP is allowed to scale the performance all the way
up to the policy max limit when the "powersave" governor is used,
but it should be constrained to the policy min limit then).
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
---
drivers/cpufreq/intel_pstate.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
Index: linux-pm/drivers/cpufreq/intel_pstate.c
===================================================================
--- linux-pm.orig/drivers/cpufreq/intel_pstate.c
+++ linux-pm/drivers/cpufreq/intel_pstate.c
@@ -2527,7 +2527,7 @@ static void intel_cpufreq_trace(struct c
}
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
- bool fast_switch)
+ bool strict, bool fast_switch)
{
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
@@ -2539,7 +2539,7 @@ static void intel_cpufreq_adjust_hwp(str
* field in it, so opportunistically update the max too if needed.
*/
value &= ~HWP_MAX_PERF(~0L);
- value |= HWP_MAX_PERF(cpu->max_perf_ratio);
+ value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
if (value == prev)
return;
@@ -2562,14 +2562,16 @@ static void intel_cpufreq_adjust_perf_ct
pstate_funcs.get_val(cpu, target_pstate));
}
-static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
- bool fast_switch)
+static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
+ int target_pstate, bool fast_switch)
{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
int old_pstate = cpu->pstate.current_pstate;
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
if (hwp_active) {
- intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch);
+ intel_cpufreq_adjust_hwp(cpu, target_pstate,
+ policy->strict_target, fast_switch);
cpu->pstate.current_pstate = target_pstate;
} else if (target_pstate != old_pstate) {
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
@@ -2609,7 +2611,7 @@ static int intel_cpufreq_target(struct c
break;
}
- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
freqs.new = target_pstate * cpu->pstate.scaling;
@@ -2628,7 +2630,7 @@ static unsigned int intel_cpufreq_fast_s
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
return target_pstate * cpu->pstate.scaling;
}
Powered by blists - more mailing lists