lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1256ee94a515216ab58553181de175cc74f396bd.1623313323.git.viresh.kumar@linaro.org>
Date:   Thu, 10 Jun 2021 13:54:01 +0530
From:   Viresh Kumar <viresh.kumar@...aro.org>
To:     Rafael Wysocki <rjw@...ysocki.net>,
        Qian Cai <quic_qiancai@...cinc.com>,
        Viresh Kumar <viresh.kumar@...aro.org>
Cc:     linux-pm@...r.kernel.org,
        Vincent Guittot <vincent.guittot@...aro.org>,
        Ionela Voinescu <ionela.voinescu@....com>,
        linux-kernel@...r.kernel.org
Subject: [PATCH 5/5] cpufreq: cppc: Fix suspend/resume specific races with the FIE code

The CPPC driver currently stops the frequency invariance related
kthread_work and irq_work from cppc_freq_invariance_exit() which is only
called during driver's removal.

This is not sufficient as the CPUs can get hot-plugged out while the
driver is in use, the same also happens during system suspend/resume.

In such a cases we can reach a state where the CPU is removed by the
kernel but its kthread_work or irq_work aren't stopped.

Fix this by implementing the start_cpu() and stop_cpu() callbacks of the
cpufreq core, which will be called for each CPU's addition/removal.

The FIE feature was marked BROKEN earlier, revert that.

Reported-by: Qian Cai <quic_qiancai@...cinc.com>
Signed-off-by: Viresh Kumar <viresh.kumar@...aro.org>
---
 drivers/cpufreq/Kconfig.arm    |   1 -
 drivers/cpufreq/cppc_cpufreq.c | 117 +++++++++++++++++++--------------
 2 files changed, 68 insertions(+), 50 deletions(-)

diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 614c34350f41..a5c5f70acfc9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -22,7 +22,6 @@ config ACPI_CPPC_CPUFREQ
 config ACPI_CPPC_CPUFREQ_FIE
 	bool "Frequency Invariance support for CPPC cpufreq driver"
 	depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
-	depends on BROKEN
 	default y
 	help
 	  This extends frequency invariance support in the CPPC cpufreq driver,
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 30a861538784..82167c657098 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -74,7 +74,6 @@ struct cppc_freq_invariance {
 
 static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
 static struct kthread_worker *kworker_fie;
-static bool fie_disabled;
 
 static struct cpufreq_driver cppc_cpufreq_driver;
 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
@@ -151,35 +150,64 @@ static struct scale_freq_data cppc_sftd = {
 	.set_freq_scale = cppc_scale_freq_tick,
 };
 
-static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
-					     struct cppc_cpudata *cpu_data)
+static void cppc_cpufreq_start_cpu(struct cpufreq_policy *policy,
+				   unsigned int cpu)
 {
+	struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, cpu);
 	struct cppc_perf_fb_ctrs fb_ctrs = {0};
-	struct cppc_freq_invariance *cppc_fi;
-	int i, ret;
+	int ret;
 
-	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
-		return;
+	cppc_fi->cpu = cpu;
+	cppc_fi->cpu_data = policy->driver_data;
+	kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
+	init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
 
-	if (fie_disabled)
+	ret = cppc_get_perf_ctrs(cpu, &fb_ctrs);
+	if (ret) {
+		pr_warn("%s: failed to read perf counters: %d\n",
+				__func__, ret);
 		return;
+	} else {
+		cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
+	}
 
-	for_each_cpu(i, policy->cpus) {
-		cppc_fi = &per_cpu(cppc_freq_inv, i);
-		cppc_fi->cpu = i;
-		cppc_fi->cpu_data = cpu_data;
-		kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
-		init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
+	/* Register for freq-invariance */
+	topology_set_scale_freq_source(&cppc_sftd, cpumask_of(cpu));
+}
 
-		ret = cppc_get_perf_ctrs(i, &fb_ctrs);
-		if (ret) {
-			pr_warn("%s: failed to read perf counters: %d\n",
-				__func__, ret);
-			fie_disabled = true;
-		} else {
-			cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
-		}
-	}
+static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy,
+				  unsigned int cpu)
+{
+	struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, cpu);
+
+	topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpumask_of(cpu));
+
+	irq_work_sync(&cppc_fi->irq_work);
+	kthread_cancel_work_sync(&cppc_fi->work);
+}
+
+static int cppc_freq_invariance_policy_init(struct cpufreq_policy *policy)
+{
+	int cpu;
+
+	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+		return 0;
+
+	for_each_cpu(cpu, policy->cpus)
+		cppc_cpufreq_start_cpu(policy, cpu);
+
+	return 0;
+}
+
+static void cppc_freq_invariance_policy_exit(struct cpufreq_policy *policy)
+{
+	int cpu;
+
+	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+		return;
+
+	for_each_cpu(cpu, policy->cpus)
+		cppc_cpufreq_stop_cpu(policy, cpu);
 }
 
 static void __init cppc_freq_invariance_init(void)
@@ -202,9 +230,6 @@ static void __init cppc_freq_invariance_init(void)
 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
 		return;
 
-	if (fie_disabled)
-		return;
-
 	kworker_fie = kthread_create_worker(0, "cppc_fie");
 	if (IS_ERR(kworker_fie))
 		return;
@@ -217,36 +242,28 @@ static void __init cppc_freq_invariance_init(void)
 		return;
 	}
 
-	/* Register for freq-invariance */
-	topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask);
+	cppc_cpufreq_driver.start_cpu = cppc_cpufreq_start_cpu;
+	cppc_cpufreq_driver.stop_cpu = cppc_cpufreq_stop_cpu;
 }
 
 static void cppc_freq_invariance_exit(void)
 {
-	struct cppc_freq_invariance *cppc_fi;
-	int i;
-
 	if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
 		return;
 
-	if (fie_disabled)
-		return;
-
-	topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask);
-
-	for_each_possible_cpu(i) {
-		cppc_fi = &per_cpu(cppc_freq_inv, i);
-		irq_work_sync(&cppc_fi->irq_work);
-	}
-
 	kthread_destroy_worker(kworker_fie);
 	kworker_fie = NULL;
 }
 
 #else
+static inline int
+cppc_freq_invariance_policy_init(struct cpufreq_policy *polic)
+{
+	return 0;
+}
+
 static inline void
-cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
-				 struct cppc_cpudata *cpu_data)
+cppc_freq_invariance_policy_exit(struct cpufreq_policy *policy)
 {
 }
 
@@ -529,11 +546,10 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	if (ret) {
 		pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
 			 caps->highest_perf, cpu, ret);
-	} else {
-		cppc_freq_invariance_policy_init(policy, cpu_data);
+		return ret;
 	}
 
-	return ret;
+	return cppc_freq_invariance_policy_init(policy);
 }
 
 static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
@@ -543,6 +559,8 @@ static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 	unsigned int cpu = policy->cpu;
 	int ret;
 
+	cppc_freq_invariance_policy_exit(policy);
+
 	cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
 
 	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
@@ -728,10 +746,11 @@ static int __init cppc_cpufreq_init(void)
 	INIT_LIST_HEAD(&cpu_data_list);
 
 	cppc_check_hisi_workaround();
+	cppc_freq_invariance_init();
 
 	ret = cpufreq_register_driver(&cppc_cpufreq_driver);
-	if (!ret)
-		cppc_freq_invariance_init();
+	if (ret)
+		cppc_freq_invariance_exit();
 
 	return ret;
 }
@@ -750,8 +769,8 @@ static inline void free_cpu_data(void)
 
 static void __exit cppc_cpufreq_exit(void)
 {
-	cppc_freq_invariance_exit();
 	cpufreq_unregister_driver(&cppc_cpufreq_driver);
+	cppc_freq_invariance_exit();
 
 	free_cpu_data();
 }
-- 
2.31.1.272.g89b43f80a514

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ