lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z1awrnPHwTVMht4E@BLRRASHENOY1.amd.com>
Date: Mon, 9 Dec 2024 14:26:14 +0530
From: "Gautham R. Shenoy" <gautham.shenoy@....com>
To: Mario Limonciello <mario.limonciello@....com>
Cc: Perry Yuan <perry.yuan@....com>, linux-kernel@...r.kernel.org,
	linux-pm@...r.kernel.org,
	Dhananjay Ugwekar <Dhananjay.Ugwekar@....com>
Subject: Re: [PATCH v2 13/16] cpufreq/amd-pstate: Check if CPPC request has
 changed before writing to the MSR or shared memory

Hello Mario,


On Sun, Dec 08, 2024 at 12:30:28AM -0600, Mario Limonciello wrote:
> Move the common MSR field formatting code to msr_update_perf() from
> its callers.
> 
> Ensure that the MSR write is necessary before flushing a write out.
> Also drop the comparison from the passive flow tracing.
> 
> Reviewed-and-tested-by: Dhananjay Ugwekar <dhananjay.ugwekar@....com>
> Signed-off-by: Mario Limonciello <mario.limonciello@....com>
> ---

[..snip..]

> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index dd11ba6c00cc3..2178931fbf87b 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -224,15 +224,26 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata)
>  static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
>  			   u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
>  {
> -	u64 value;
> +	u64 value, prev;
> +
> +	value = prev = READ_ONCE(cpudata->cppc_req_cached);
> +
> +	value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
> +		   AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
> +	value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
> +	value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
> +	value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
> +	value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
> +
> +	if (value == prev)
> +		return 0;
>  
> -	value = READ_ONCE(cpudata->cppc_req_cached);
>  	if (fast_switch) {
> -		wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
> +		wrmsrl(MSR_AMD_CPPC_REQ, value);
>  		return 0;
>  	} else {
> -		int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
> -					READ_ONCE(cpudata->cppc_req_cached));
> +		int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
> +
>  		if (ret)
>  			return ret;

Ok, so you are recomputing the value in this patch. Does it also make
sense to move trace_amd_pstate_perf() call to this place?


--
Thanks and Regards
gautham.

>  	}
> @@ -528,9 +539,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
>  {
>  	unsigned long max_freq;
>  	struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
> -	u64 prev = READ_ONCE(cpudata->cppc_req_cached);
>  	u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
> -	u64 value = prev;
>  
>  	des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
>  
> @@ -546,27 +555,14 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
>  	if (!cpudata->boost_supported)
>  		max_perf = min_t(unsigned long, nominal_perf, max_perf);
>  
> -	value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
> -		   AMD_CPPC_DES_PERF_MASK);
> -	value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
> -	value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
> -	value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
> -
>  	if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
>  		trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
>  			cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
> -				cpudata->cpu, (value != prev), fast_switch);
> +				cpudata->cpu, fast_switch);
>  	}
>  
> -	if (value == prev)
> -		goto cpufreq_policy_put;
> -
> -	WRITE_ONCE(cpudata->cppc_req_cached, value);
> -
>  	amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
>  
> -cpufreq_policy_put:
> -
>  	cpufreq_cpu_put(policy);
>  }
>  
> @@ -1562,19 +1558,10 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
>  static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
>  {
>  	struct amd_cpudata *cpudata = policy->driver_data;
> -	u64 value;
>  	u32 epp;
>  
>  	amd_pstate_update_min_max_limit(policy);
>  
> -	value = READ_ONCE(cpudata->cppc_req_cached);
> -
> -	value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
> -		   AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
> -	value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, cpudata->max_limit_perf);
> -	value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, 0);
> -	value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, cpudata->min_limit_perf);
> -
>  	if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
>  		epp = 0;
>  	else
> -- 
> 2.43.0
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ