lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <3bbc34e2-d69c-3a1f-2afe-4564329a2c61@infradead.org>
Date:   Wed, 26 May 2021 10:52:19 -0700
From:   Randy Dunlap <rdunlap@...radead.org>
To:     "Rafael J. Wysocki" <rjw@...ysocki.net>,
        Linux PM <linux-pm@...r.kernel.org>
Cc:     LKML <linux-kernel@...r.kernel.org>,
        Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
        Chen Yu <yu.c.chen@...el.com>
Subject: Re: [PATCH v2] cpufreq: intel_pstate: hybrid: Fix build with
 CONFIG_ACPI unset

On 5/26/21 10:30 AM, Rafael J. Wysocki wrote:
> From: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
> 
> One of the previous commits introducing hybrid processor support to
> intel_pstate broke build with CONFIG_ACPI unset.
> 
> Fix that and while at it make empty stubs of two functions related
> to ACPI CPPC static inline and fix a spelling mistake in the name of
> one of them.
> 
> Fixes: eb3693f0521e ("cpufreq: intel_pstate: hybrid: CPU-specific scaling factor")
> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
> Reported-by: Randy Dunlap <rdunlap@...radead.org>

Acked-by: Randy Dunlap <rdunlap@...radead.org> # build-tested


Thanks.

> ---
>  drivers/cpufreq/intel_pstate.c |   91 +++++++++++++++++++++--------------------
>  1 file changed, 48 insertions(+), 43 deletions(-)
> 
> Index: linux-pm/drivers/cpufreq/intel_pstate.c
> ===================================================================
> --- linux-pm.orig/drivers/cpufreq/intel_pstate.c
> +++ linux-pm/drivers/cpufreq/intel_pstate.c
> @@ -369,7 +369,7 @@ static void intel_pstate_set_itmt_prio(i
>  	}
>  }
>  
> -static int intel_pstate_get_cppc_guranteed(int cpu)
> +static int intel_pstate_get_cppc_guaranteed(int cpu)
>  {
>  	struct cppc_perf_caps cppc_perf;
>  	int ret;
> @@ -385,7 +385,7 @@ static int intel_pstate_get_cppc_gurante
>  }
>  
>  #else /* CONFIG_ACPI_CPPC_LIB */
> -static void intel_pstate_set_itmt_prio(int cpu)
> +static inline void intel_pstate_set_itmt_prio(int cpu)
>  {
>  }
>  #endif /* CONFIG_ACPI_CPPC_LIB */
> @@ -470,6 +470,20 @@ static void intel_pstate_exit_perf_limit
>  
>  	acpi_processor_unregister_performance(policy->cpu);
>  }
> +
> +static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps)
> +{
> +	return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf;
> +}
> +
> +static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu,
> +					struct cppc_perf_caps *caps)
> +{
> +	if (cppc_get_perf_caps(cpu->cpu, caps))
> +		return false;
> +
> +	return caps->highest_perf && caps->lowest_perf <= caps->highest_perf;
> +}
>  #else /* CONFIG_ACPI */
>  static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
>  {
> @@ -486,26 +500,12 @@ static inline bool intel_pstate_acpi_pm_
>  #endif /* CONFIG_ACPI */
>  
>  #ifndef CONFIG_ACPI_CPPC_LIB
> -static int intel_pstate_get_cppc_guranteed(int cpu)
> +static inline int intel_pstate_get_cppc_guaranteed(int cpu)
>  {
>  	return -ENOTSUPP;
>  }
>  #endif /* CONFIG_ACPI_CPPC_LIB */
>  
> -static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps)
> -{
> -	return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf;
> -}
> -
> -static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu,
> -					struct cppc_perf_caps *caps)
> -{
> -	if (cppc_get_perf_caps(cpu->cpu, caps))
> -		return false;
> -
> -	return caps->highest_perf && caps->lowest_perf <= caps->highest_perf;
> -}
> -
>  static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
>  {
>  	pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu);
> @@ -530,7 +530,6 @@ static void intel_pstate_hybrid_hwp_perf
>   */
>  static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu)
>  {
> -	struct cppc_perf_caps caps;
>  	int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
>  	int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
>  	int perf_ctl_turbo = pstate_funcs.get_turbo();
> @@ -548,33 +547,39 @@ static void intel_pstate_hybrid_hwp_cali
>  	pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
>  	pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
>  
> -	if (intel_pstate_cppc_perf_caps(cpu, &caps)) {
> -		if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) {
> -			pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu);
> -
> -			/*
> -			 * If the CPPC nominal performance is valid, it can be
> -			 * assumed to correspond to cpu_khz.
> -			 */
> -			if (caps.nominal_perf == perf_ctl_max_phys) {
> -				intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
> -				return;
> -			}
> -			scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf);
> -		} else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) {
> -			pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu);
> -
> -			/*
> -			 * If the CPPC guaranteed performance is valid, it can
> -			 * be assumed to correspond to max_freq.
> -			 */
> -			if (caps.guaranteed_perf == perf_ctl_max) {
> -				intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
> -				return;
> +#ifdef CONFIG_ACPI
> +	if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) {
> +		struct cppc_perf_caps caps;
> +
> +		if (intel_pstate_cppc_perf_caps(cpu, &caps)) {
> +			if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) {
> +				pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu);
> +
> +				/*
> +				 * If the CPPC nominal performance is valid, it
> +				 * can be assumed to correspond to cpu_khz.
> +				 */
> +				if (caps.nominal_perf == perf_ctl_max_phys) {
> +					intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
> +					return;
> +				}
> +				scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf);
> +			} else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) {
> +				pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu);
> +
> +				/*
> +				 * If the CPPC guaranteed performance is valid,
> +				 * it can be assumed to correspond to max_freq.
> +				 */
> +				if (caps.guaranteed_perf == perf_ctl_max) {
> +					intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
> +					return;
> +				}
> +				scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf);
>  			}
> -			scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf);
>  		}
>  	}
> +#endif
>  	/*
>  	 * If using the CPPC data to compute the HWP-to-frequency scaling factor
>  	 * doesn't work, use the HWP_CAP gauranteed perf for this purpose with
> @@ -944,7 +949,7 @@ static ssize_t show_base_frequency(struc
>  	struct cpudata *cpu = all_cpu_data[policy->cpu];
>  	int ratio, freq;
>  
> -	ratio = intel_pstate_get_cppc_guranteed(policy->cpu);
> +	ratio = intel_pstate_get_cppc_guaranteed(policy->cpu);
>  	if (ratio <= 0) {
>  		u64 cap;
>  
> 
> 
> 


-- 
~Randy

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ