lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z7NEO1lnH5/BpnF1@BLRRASHENOY1.amd.com>
Date: Mon, 17 Feb 2025 19:44:19 +0530
From: "Gautham R. Shenoy" <gautham.shenoy@....com>
To: Mario Limonciello <superm1@...nel.org>
Cc: Perry Yuan <perry.yuan@....com>,
	Dhananjay Ugwekar <Dhananjay.Ugwekar@....com>,
	"open list:X86 ARCHITECTURE (32-BIT AND 64-BIT)" <linux-kernel@...r.kernel.org>,
	"open list:CPU FREQUENCY SCALING FRAMEWORK" <linux-pm@...r.kernel.org>,
	Mario Limonciello <mario.limonciello@....com>
Subject: Re: [PATCH v2 13/17] cpufreq/amd-pstate: Move all EPP tracing into
 *_update_perf and *_set_epp functions

On Fri, Feb 14, 2025 at 06:52:40PM -0600, Mario Limonciello wrote:
> From: Mario Limonciello <mario.limonciello@....com>
> 
> The EPP tracing is done by the caller today, but this precludes the
> information about whether the CPPC request has changed.
> 
> Move it into the update_perf and set_epp functions and include information
> about whether the request has changed from the last one.

A consequential change is that the amd_pstate_update_perf() and the
amd_pstate_set_epp() functions now have the cpufreq_policy parameter
in place of the cpudata parameter as policy->boost_enabled is needed
by the tracepoint. Can you please add this to the commit log?


Otherwise, the patch looks good to me.

Reviewed-by: Gautham R. Shenoy <gautham.shenoy@....com>


-- 
Thanks and Regards
gautham.


> 
> Reviewed-by: Dhananjay Ugwekar <dhananjay.ugwekar@....com>
> Signed-off-by: Mario Limonciello <mario.limonciello@....com>
> ---
>  drivers/cpufreq/amd-pstate-trace.h |  13 +++-
>  drivers/cpufreq/amd-pstate.c       | 119 +++++++++++++++++------------
>  2 files changed, 81 insertions(+), 51 deletions(-)
> 
> diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
> index f457d4af2c62e..32e1bdc588c52 100644
> --- a/drivers/cpufreq/amd-pstate-trace.h
> +++ b/drivers/cpufreq/amd-pstate-trace.h
> @@ -90,7 +90,8 @@ TRACE_EVENT(amd_pstate_epp_perf,
>  		 u8 epp,
>  		 u8 min_perf,
>  		 u8 max_perf,
> -		 bool boost
> +		 bool boost,
> +		 bool changed
>  		 ),
>  
>  	TP_ARGS(cpu_id,
> @@ -98,7 +99,8 @@ TRACE_EVENT(amd_pstate_epp_perf,
>  		epp,
>  		min_perf,
>  		max_perf,
> -		boost),
> +		boost,
> +		changed),
>  
>  	TP_STRUCT__entry(
>  		__field(unsigned int, cpu_id)
> @@ -107,6 +109,7 @@ TRACE_EVENT(amd_pstate_epp_perf,
>  		__field(u8, min_perf)
>  		__field(u8, max_perf)
>  		__field(bool, boost)
> +		__field(bool, changed)
>  		),
>  
>  	TP_fast_assign(
> @@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
>  		__entry->min_perf = min_perf;
>  		__entry->max_perf = max_perf;
>  		__entry->boost = boost;
> +		__entry->changed = changed;
>  		),
>  
> -	TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u",
> +	TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u",
>  		  (unsigned int)__entry->cpu_id,
>  		  (u8)__entry->min_perf,
>  		  (u8)__entry->max_perf,
>  		  (u8)__entry->highest_perf,
>  		  (u8)__entry->epp,
> -		  (bool)__entry->boost
> +		  (bool)__entry->boost,
> +		  (bool)__entry->changed
>  		 )
>  );
>  
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index 9517da9b7e692..1304bdc23e809 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -228,9 +228,10 @@ static u8 shmem_get_epp(struct amd_cpudata *cpudata)
>  	return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
>  }
>  
> -static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
> +static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
>  			   u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
>  {
> +	struct amd_cpudata *cpudata = policy->driver_data;
>  	u64 value, prev;
>  
>  	value = prev = READ_ONCE(cpudata->cppc_req_cached);
> @@ -242,6 +243,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
>  	value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
>  	value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
>  
> +	if (trace_amd_pstate_epp_perf_enabled()) {
> +		union perf_cached perf = READ_ONCE(cpudata->perf);
> +
> +		trace_amd_pstate_epp_perf(cpudata->cpu,
> +					  perf.highest_perf,
> +					  epp,
> +					  min_perf,
> +					  max_perf,
> +					  policy->boost_enabled,
> +					  value != prev);
> +	}
> +
>  	if (value == prev)
>  		return 0;
>  
> @@ -256,24 +269,26 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
>  	}
>  
>  	WRITE_ONCE(cpudata->cppc_req_cached, value);
> -	WRITE_ONCE(cpudata->epp_cached, epp);
> +	if (epp != cpudata->epp_cached)
> +		WRITE_ONCE(cpudata->epp_cached, epp);
>  
>  	return 0;
>  }
>  
>  DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
>  
> -static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
> +static inline int amd_pstate_update_perf(struct cpufreq_policy *policy,
>  					  u8 min_perf, u8 des_perf,
>  					  u8 max_perf, u8 epp,
>  					  bool fast_switch)
>  {
> -	return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
> +	return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
>  						   max_perf, epp, fast_switch);
>  }
>  
> -static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
> +static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
>  {
> +	struct amd_cpudata *cpudata = policy->driver_data;
>  	u64 value, prev;
>  	int ret;
>  
> @@ -281,6 +296,19 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
>  	value &= ~AMD_CPPC_EPP_PERF_MASK;
>  	value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
>  
> +	if (trace_amd_pstate_epp_perf_enabled()) {
> +		union perf_cached perf = cpudata->perf;
> +
> +		trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
> +					  epp,
> +					  FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
> +						    cpudata->cppc_req_cached),
> +					  FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
> +						    cpudata->cppc_req_cached),
> +					  policy->boost_enabled,
> +					  value != prev);
> +	}
> +
>  	if (value == prev)
>  		return 0;
>  
> @@ -299,15 +327,29 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
>  
>  DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
>  
> -static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u8 epp)
> +static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp)
>  {
> -	return static_call(amd_pstate_set_epp)(cpudata, epp);
> +	return static_call(amd_pstate_set_epp)(policy, epp);
>  }
>  
> -static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp)
> +static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
>  {
> -	int ret;
> +	struct amd_cpudata *cpudata = policy->driver_data;
>  	struct cppc_perf_ctrls perf_ctrls;
> +	int ret;
> +
> +	if (trace_amd_pstate_epp_perf_enabled()) {
> +		union perf_cached perf = cpudata->perf;
> +
> +		trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
> +					  epp,
> +					  FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
> +						    cpudata->cppc_req_cached),
> +					  FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
> +						    cpudata->cppc_req_cached),
> +					  policy->boost_enabled,
> +					  epp != cpudata->epp_cached);
> +	}
>  
>  	if (epp == cpudata->epp_cached)
>  		return 0;
> @@ -339,17 +381,7 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
>  		return -EBUSY;
>  	}
>  
> -	if (trace_amd_pstate_epp_perf_enabled()) {
> -		union perf_cached perf = READ_ONCE(cpudata->perf);
> -
> -		trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
> -					  epp,
> -					  FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
> -					  FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
> -					  policy->boost_enabled);
> -	}
> -
> -	return amd_pstate_set_epp(cpudata, epp);
> +	return amd_pstate_set_epp(policy, epp);
>  }
>  
>  static inline int msr_cppc_enable(bool enable)
> @@ -492,15 +524,16 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
>  	return static_call(amd_pstate_init_perf)(cpudata);
>  }
>  
> -static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
> +static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
>  			     u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
>  {
> +	struct amd_cpudata *cpudata = policy->driver_data;
>  	struct cppc_perf_ctrls perf_ctrls;
>  	u64 value, prev;
>  	int ret;
>  
>  	if (cppc_state == AMD_PSTATE_ACTIVE) {
> -		int ret = shmem_set_epp(cpudata, epp);
> +		int ret = shmem_set_epp(policy, epp);
>  
>  		if (ret)
>  			return ret;
> @@ -515,6 +548,18 @@ static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
>  	value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
>  	value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
>  
> +	if (trace_amd_pstate_epp_perf_enabled()) {
> +		union perf_cached perf = READ_ONCE(cpudata->perf);
> +
> +		trace_amd_pstate_epp_perf(cpudata->cpu,
> +					  perf.highest_perf,
> +					  epp,
> +					  min_perf,
> +					  max_perf,
> +					  policy->boost_enabled,
> +					  value != prev);
> +	}
> +
>  	if (value == prev)
>  		return 0;
>  
> @@ -592,7 +637,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
>  				cpudata->cpu, fast_switch);
>  	}
>  
> -	amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
> +	amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch);
>  }
>  
>  static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
> @@ -1527,7 +1572,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
>  			return ret;
>  		WRITE_ONCE(cpudata->cppc_req_cached, value);
>  	}
> -	ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
> +	ret = amd_pstate_set_epp(policy, cpudata->epp_default);
>  	if (ret)
>  		return ret;
>  
> @@ -1568,14 +1613,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
>  		epp = READ_ONCE(cpudata->epp_cached);
>  
>  	perf = READ_ONCE(cpudata->perf);
> -	if (trace_amd_pstate_epp_perf_enabled()) {
> -		trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, epp,
> -					  perf.min_limit_perf,
> -					  perf.max_limit_perf,
> -					  policy->boost_enabled);
> -	}
>  
> -	return amd_pstate_update_perf(cpudata, perf.min_limit_perf, 0U,
> +	return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U,
>  				      perf.max_limit_perf, epp, false);
>  }
>  
> @@ -1615,14 +1654,7 @@ static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
>  	if (ret)
>  		pr_err("failed to enable amd pstate during resume, return %d\n", ret);
>  
> -	if (trace_amd_pstate_epp_perf_enabled()) {
> -		trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
> -					  cpudata->epp_cached,
> -					  FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
> -					  perf.highest_perf, policy->boost_enabled);
> -	}
> -
> -	return amd_pstate_update_perf(cpudata, 0, 0, perf.highest_perf, cpudata->epp_cached, false);
> +	return amd_pstate_update_perf(policy, 0, 0, perf.highest_perf, cpudata->epp_cached, false);
>  }
>  
>  static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
> @@ -1648,14 +1680,7 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
>  	if (cpudata->suspended)
>  		return 0;
>  
> -	if (trace_amd_pstate_epp_perf_enabled()) {
> -		trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
> -					  AMD_CPPC_EPP_BALANCE_POWERSAVE,
> -					  perf.lowest_perf, perf.lowest_perf,
> -					  policy->boost_enabled);
> -	}
> -
> -	return amd_pstate_update_perf(cpudata, perf.lowest_perf, 0, perf.lowest_perf,
> +	return amd_pstate_update_perf(policy, perf.lowest_perf, 0, perf.lowest_perf,
>  				      AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
>  }
>  
> -- 
> 2.43.0
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ