lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 9 May 2023 18:05:03 +0000
From:   Wyes Karny <wyes.karny@....com>
To:     <ray.huang@....com>, <rafael@...nel.org>,
        <viresh.kumar@...aro.org>, <srinivas.pandruvada@...ux.intel.com>,
        <lenb@...nel.org>
CC:     <mingo@...hat.com>, <peterz@...radead.org>,
        <juri.lelli@...hat.com>, <vincent.guittot@...aro.org>,
        <dietmar.eggemann@....com>, <rostedt@...dmis.org>,
        <bsegall@...gle.com>, <mgorman@...e.de>, <joel@...lfernandes.org>,
        <linux-pm@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        "Gautham R. Shenoy" <gautham.shenoy@....com>,
        Wyes Karny <wyes.karny@....com>
Subject: [PATCH v2 2/2] amd_pstate: Add ->fast_switch() callback

From: "Gautham R. Shenoy" <gautham.shenoy@....com>

Schedutil normally calls the adjust_perf callback for drivers with
adjust_perf callback available and fast_switch_possible flag set.
However, when frequency invariance is disabled and schedutil tries to
invoke fast_switch, and that callback is NULL, it schedules a kthread to
invoke the target() callback, which could slow down the frequency
update.

Prevent the frequency update slow down by implementing the fast_switch
callback for amd_pstate.

Signed-off-by: Wyes Karny <wyes.karny@....com>
Signed-off-by: Gautham R. Shenoy <gautham.shenoy@....com>
---
 drivers/cpufreq/amd-pstate.c | 40 ++++++++++++++++++++++++++++--------
 1 file changed, 32 insertions(+), 8 deletions(-)

diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 007bfe724a6a..d5758b67b6f3 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -444,9 +444,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
 	return 0;
 }
 
-static int amd_pstate_target(struct cpufreq_policy *policy,
-			     unsigned int target_freq,
-			     unsigned int relation)
+static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+				  unsigned int target_freq, bool fast_switch)
 {
 	struct cpufreq_freqs freqs;
 	struct amd_cpudata *cpudata = policy->driver_data;
@@ -465,14 +464,37 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
 	des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
 				     cpudata->max_freq);
 
-	cpufreq_freq_transition_begin(policy, &freqs);
+	WARN_ON(fast_switch && !policy->fast_switch_enabled);
+	/*
+	 * If fast_switch is desired, then there aren't any registered
+	 * transition notifiers. See comment for
+	 * cpufreq_enable_fast_switch().
+	 */
+	if (!fast_switch)
+		cpufreq_freq_transition_begin(policy, &freqs);
+
 	amd_pstate_update(cpudata, min_perf, des_perf,
-			  max_perf, false, policy->governor->flags);
-	cpufreq_freq_transition_end(policy, &freqs, false);
+			max_perf, fast_switch, policy->governor->flags);
+
+	if (!fast_switch)
+		cpufreq_freq_transition_end(policy, &freqs, false);
 
 	return 0;
 }
 
+static int amd_pstate_target(struct cpufreq_policy *policy,
+			     unsigned int target_freq,
+			     unsigned int relation)
+{
+	return amd_pstate_update_freq(policy, target_freq, false);
+}
+
+static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+				  unsigned int target_freq)
+{
+	return amd_pstate_update_freq(policy, target_freq, true);
+}
+
 static void amd_pstate_adjust_perf(unsigned int cpu,
 				   unsigned long _min_perf,
 				   unsigned long target_perf,
@@ -675,9 +697,10 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
 	 * For shared memory system frequency update takes time that's why
 	 * do this in deferred kthread context.
 	 */
-	if (boot_cpu_has(X86_FEATURE_CPPC))
+	if (boot_cpu_has(X86_FEATURE_CPPC)) {
 		current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
-	else
+		policy->fast_switch_possible = true;
+	} else
 		current_pstate_driver->adjust_perf = NULL;
 
 	ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
@@ -1313,6 +1336,7 @@ static struct cpufreq_driver amd_pstate_driver = {
 	.flags		= CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
 	.verify		= amd_pstate_verify,
 	.target		= amd_pstate_target,
+	.fast_switch    = amd_pstate_fast_switch,
 	.init		= amd_pstate_cpu_init,
 	.exit		= amd_pstate_cpu_exit,
 	.suspend	= amd_pstate_cpu_suspend,
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ