[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6039220.MhkbZ0Pkbq@rjwysocki.net>
Date: Wed, 16 Apr 2025 19:59:12 +0200
From: "Rafael J. Wysocki" <rjw@...ysocki.net>
To: Linux PM <linux-pm@...r.kernel.org>
Cc: LKML <linux-kernel@...r.kernel.org>, Lukasz Luba <lukasz.luba@....com>,
Peter Zijlstra <peterz@...radead.org>,
Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Morten Rasmussen <morten.rasmussen@....com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Ricardo Neri <ricardo.neri-calderon@...ux.intel.com>,
Pierre Gondois <pierre.gondois@....com>,
Christian Loehle <christian.loehle@....com>
Subject:
[RFT][PATCH v1 2/8] cpufreq/sched: Move cpufreq-specific EAS checks to
cpufreq
From: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
Doing cpufreq-specific EAS checks that require accessing policy
internals directly from sched_is_eas_possible() is a bit unfortunate,
so introduce cpufreq_ready_for_eas() in cpufreq, move those checks
into that new function and make sched_is_eas_possible() call it.
While at it, address a possible race between the EAS governor check
and governor change by doing the former under the policy rwsem.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
---
v0.3 -> v1
* Add a new helper called cpufreq_policy_is_good_for_eas() which is
properly synchronized with governor changes.
* Slightly modify debug messages.
This patch is regarded as a cleanup for 6.16.
---
drivers/cpufreq/cpufreq.c | 32 ++++++++++++++++++++++++++++++++
include/linux/cpufreq.h | 2 ++
kernel/sched/topology.c | 25 +++++--------------------
3 files changed, 39 insertions(+), 20 deletions(-)
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -3041,6 +3041,38 @@
return 0;
}
+
+static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
+{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ pr_debug("cpufreq policy not set for CPU: %d", cpu);
+ return false;
+ }
+
+ guard(cpufreq_policy_read)(policy);
+
+ return sugov_is_governor(policy);
+}
+
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
+{
+ unsigned int cpu;
+
+ /* Do not attempt EAS if schedutil is not being used. */
+ for_each_cpu(cpu, cpu_mask) {
+ if (!cpufreq_policy_is_good_for_eas(cpu)) {
+ pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
+ cpumask_pr_args(cpu_mask));
+ return false;
+ }
+ }
+
+ return true;
+}
+
module_param(off, int, 0444);
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
core_initcall(cpufreq_core_init);
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1212,6 +1212,8 @@
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask);
+
static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
{
dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -212,8 +212,6 @@
static bool sched_is_eas_possible(const struct cpumask *cpu_mask)
{
bool any_asym_capacity = false;
- struct cpufreq_policy *policy;
- bool policy_is_ready;
int i;
/* EAS is enabled for asymmetric CPU capacity topologies. */
@@ -248,25 +246,12 @@
return false;
}
- /* Do not attempt EAS if schedutil is not being used. */
- for_each_cpu(i, cpu_mask) {
- policy = cpufreq_cpu_get(i);
- if (!policy) {
- if (sched_debug()) {
- pr_info("rd %*pbl: Checking EAS, cpufreq policy not set for CPU: %d",
- cpumask_pr_args(cpu_mask), i);
- }
- return false;
- }
- policy_is_ready = sugov_is_governor(policy);
- cpufreq_cpu_put(policy);
- if (!policy_is_ready) {
- if (sched_debug()) {
- pr_info("rd %*pbl: Checking EAS, schedutil is mandatory\n",
- cpumask_pr_args(cpu_mask));
- }
- return false;
+ if (!cpufreq_ready_for_eas(cpu_mask)) {
+ if (sched_debug()) {
+ pr_info("rd %*pbl: Checking EAS: cpufreq is not ready",
+ cpumask_pr_args(cpu_mask));
}
+ return false;
}
return true;
Powered by blists - more mailing lists