lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230520133249.22689-1-kweifat@gmail.com>
Date:   Sat, 20 May 2023 21:32:49 +0800
From:   Fieah Lim <kweifat@...il.com>
To:     srinivas.pandruvada@...ux.intel.com, lenb@...nel.org,
        rafael@...nel.org, linux-pm@...r.kernel.org,
        linux-kernel@...r.kernel.org
Cc:     Fieah Lim <kweifat@...il.com>
Subject: [PATCH] cpufreq: intel_pstate: Avoid initializing variables prematurely

all_cpu_data struct is pretty large,
we should avoid assigning it around when the function has a chance
to bail out earlier before actually using it.

The same idea applies to the
this_cpu of notify_hwp_interrupt
and
the hwp_cap of intel_pstate_hwp_boost_up,
which are also initialized prematurely.
I think it also qualifies as a micro-optimization.

While at it, tidy up all the cpu_data initialization,
for the sake of consistency.

Signed-off-by: Fieah Lim <kweifat@...il.com>
---
 drivers/cpufreq/intel_pstate.c | 35 +++++++++++++++++++---------------
 1 file changed, 20 insertions(+), 15 deletions(-)

diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2548ec92faa2..831769c39778 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -464,9 +464,8 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 
 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
 {
-	struct cpudata *cpu;
+	struct cpudata *cpu = all_cpu_data[policy->cpu];
 
-	cpu = all_cpu_data[policy->cpu];
 	if (!cpu->valid_pss_table)
 		return;
 
@@ -539,9 +538,8 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
 static inline void update_turbo_state(void)
 {
 	u64 misc_en;
-	struct cpudata *cpu;
+	struct cpudata *cpu = all_cpu_data[0];
 
-	cpu = all_cpu_data[0];
 	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
 	global.turbo_disabled =
 		(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
@@ -769,7 +767,7 @@ static struct cpufreq_driver intel_pstate;
 static ssize_t store_energy_performance_preference(
 		struct cpufreq_policy *policy, const char *buf, size_t count)
 {
-	struct cpudata *cpu = all_cpu_data[policy->cpu];
+	struct cpudata *cpu;
 	char str_preference[21];
 	bool raw = false;
 	ssize_t ret;
@@ -802,6 +800,8 @@ static ssize_t store_energy_performance_preference(
 	if (!intel_pstate_driver)
 		return -EAGAIN;
 
+	cpu = all_cpu_data[policy->cpu];
+
 	mutex_lock(&intel_pstate_limits_lock);
 
 	if (intel_pstate_driver == &intel_pstate) {
@@ -1297,7 +1297,7 @@ static void update_qos_request(enum freq_qos_req_type type)
 	int i;
 
 	for_each_possible_cpu(i) {
-		struct cpudata *cpu = all_cpu_data[i];
+		struct cpudata *cpu;
 		unsigned int freq, perf_pct;
 
 		policy = cpufreq_cpu_get(i);
@@ -1310,6 +1310,8 @@ static void update_qos_request(enum freq_qos_req_type type)
 		if (!req)
 			continue;
 
+		cpu = all_cpu_data[i];
+
 		if (hwp_active)
 			intel_pstate_get_hwp_cap(cpu);
 
@@ -1579,7 +1581,7 @@ static cpumask_t hwp_intr_enable_mask;
 
 void notify_hwp_interrupt(void)
 {
-	unsigned int this_cpu = smp_processor_id();
+	unsigned int this_cpu;
 	struct cpudata *cpudata;
 	unsigned long flags;
 	u64 value;
@@ -1591,6 +1593,8 @@ void notify_hwp_interrupt(void)
 	if (!(value & 0x01))
 		return;
 
+	this_cpu = smp_processor_id();
+
 	spin_lock_irqsave(&hwp_notify_lock, flags);
 
 	if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
@@ -2025,7 +2029,7 @@ static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
 {
 	u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
-	u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
+	u64 hwp_cap;
 	u32 max_limit = (hwp_req & 0xff00) >> 8;
 	u32 min_limit = (hwp_req & 0xff);
 	u32 boost_level1;
@@ -2052,6 +2056,7 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
 		cpu->hwp_boost_min = min_limit;
 
 	/* level at half way mark between min and guranteed */
+	hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
 	boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
 
 	if (cpu->hwp_boost_min < boost_level1)
@@ -2389,9 +2394,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
 
 static int intel_pstate_init_cpu(unsigned int cpunum)
 {
-	struct cpudata *cpu;
-
-	cpu = all_cpu_data[cpunum];
+	struct cpudata *cpu = all_cpu_data[cpunum];
 
 	if (!cpu) {
 		cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
@@ -2431,11 +2434,13 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
 
 static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
 {
-	struct cpudata *cpu = all_cpu_data[cpu_num];
+	struct cpudata *cpu;
 
 	if (hwp_active && !hwp_boost)
 		return;
 
+	cpu = all_cpu_data[cpu_num];
+
 	if (cpu->update_util_set)
 		return;
 
@@ -2638,9 +2643,7 @@ static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy)
 
 static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
 {
-	struct cpudata *cpu = all_cpu_data[policy->cpu];
-
-	pr_debug("CPU %d going online\n", cpu->cpu);
+	pr_debug("CPU %d going online\n", policy->cpu);
 
 	intel_pstate_init_acpi_perf_limits(policy);
 
@@ -2649,6 +2652,8 @@ static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
 		 * Re-enable HWP and clear the "suspended" flag to let "resume"
 		 * know that it need not do that.
 		 */
+		struct cpudata *cpu = all_cpu_data[policy->cpu];
+
 		intel_pstate_hwp_reenable(cpu);
 		cpu->suspended = false;
 	}
-- 
2.40.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ