From 380d5a340ebeb172c93a878fd84a12e7bfea9cff Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 3 Sep 2021 10:41:35 -0700 Subject: [TEST PATCH] cpufreq: intel_pstate: Fix for HWP interrupt before driver is ready In x1 gen9 laptop, one HWP interrupt arrives before driver is ready to handle on that CPU. Here firmware is enabling and sending an interrupt for guarantee change. Since driver didn't have cpudata initialized it will cause NULL pointer when trying to schedule processing of interrupt in a workwqueue. To avoid this set a cpumask of CPUs for which driver has initialized interrupts. If not initialized simply clear the HWP status. Since the same thing may happen during S3 resume, clear the cpumask during offline and let it recreate it during online. Signed-off-by: Srinivas Pandruvada --- drivers/cpufreq/intel_pstate.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b4ffe6c8a0d0..5ac86bfa1080 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -298,6 +298,8 @@ static bool hwp_boost __read_mostly; static struct cpufreq_driver *intel_pstate_driver __read_mostly; +static cpumask_t hwp_intr_enable_mask; + #ifdef CONFIG_ACPI static bool acpi_ppc; #endif @@ -1067,11 +1069,15 @@ static void intel_pstate_hwp_set(unsigned int cpu) wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } +static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); + static void intel_pstate_hwp_offline(struct cpudata *cpu) { u64 value = READ_ONCE(cpu->hwp_req_cached); int min_perf; + intel_pstate_disable_hwp_interrupt(cpu); + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { /* * In case the EPP has been set to "performance" by the @@ -1645,20 +1651,35 @@ void notify_hwp_interrupt(void) if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) return; - rdmsrl(MSR_HWP_STATUS, value); + rdmsrl_safe(MSR_HWP_STATUS, &value); if (!(value & 0x01)) return; + if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) { + wrmsrl_safe(MSR_HWP_STATUS, 0); + return; + } + cpudata = all_cpu_data[this_cpu]; schedule_delayed_work_on(this_cpu, &cpudata->hwp_notify_work, msecs_to_jiffies(10)); } +static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) +{ + + if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) { + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + cancel_delayed_work_sync(&cpudata->hwp_notify_work); + } +} + static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) { /* Enable HWP notification interrupt for guaranteed performance change */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); + cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); } } -- 2.17.1