[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <bb09e7e8-5824-4bc1-9697-1929a4cf717e@amd.com>
Date: Tue, 2 Jul 2024 12:49:26 -0500
From: Mario Limonciello <mario.limonciello@....com>
To: Dhananjay Ugwekar <Dhananjay.Ugwekar@....com>, rafael@...nel.org,
viresh.kumar@...aro.org, gautham.shenoy@....com, perry.yuan@....com,
skhan@...uxfoundation.org, li.meng@....com, ray.huang@....com
Cc: linux-pm@...r.kernel.org, linux-kernel@...r.kernel.org,
David Arcari <darcari@...hat.com>
Subject: Re: [PATCH v2 2/2] cpufreq/amd-pstate: Fix the scaling_max_freq
setting on shared memory CPPC systems
On 7/2/2024 3:14, Dhananjay Ugwekar wrote:
> On shared memory CPPC systems, with amd_pstate=active mode, the change
> in scaling_max_freq doesn't get written to the shared memory
> region. Due to this, the writes to the scaling_max_freq sysfs file
> don't take effect. Fix this by propagating the scaling_max_freq
> changes to the shared memory region.
>
> Fixes: ffa5096a7c33 ("cpufreq: amd-pstate: implement Pstate EPP support for the AMD processors")
> Reported-by: David Arcari <darcari@...hat.com>
> Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@....com>
Reviewed-by: Mario Limonciello <mario.limonciello@....com>
> ---
> drivers/cpufreq/amd-pstate.c | 43 +++++++++++++++++++-----------------
> 1 file changed, 23 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index 9ad62dbe8bfb..a092b13ffbc2 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -247,6 +247,26 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
> return index;
> }
>
> +static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
> + u32 des_perf, u32 max_perf, bool fast_switch)
> +{
> + if (fast_switch)
> + wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
> + else
> + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
> + READ_ONCE(cpudata->cppc_req_cached));
> +}
> +
> +DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
> +
> +static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
> + u32 min_perf, u32 des_perf,
> + u32 max_perf, bool fast_switch)
> +{
> + static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
> + max_perf, fast_switch);
> +}
> +
> static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
> {
> int ret;
> @@ -263,6 +283,9 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
> if (!ret)
> cpudata->epp_cached = epp;
> } else {
> + amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
> + cpudata->max_limit_perf, false);
> +
> perf_ctrls.energy_perf = epp;
> ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
> if (ret) {
> @@ -452,16 +475,6 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
> return static_call(amd_pstate_init_perf)(cpudata);
> }
>
> -static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
> - u32 des_perf, u32 max_perf, bool fast_switch)
> -{
> - if (fast_switch)
> - wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
> - else
> - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
> - READ_ONCE(cpudata->cppc_req_cached));
> -}
> -
> static void cppc_update_perf(struct amd_cpudata *cpudata,
> u32 min_perf, u32 des_perf,
> u32 max_perf, bool fast_switch)
> @@ -475,16 +488,6 @@ static void cppc_update_perf(struct amd_cpudata *cpudata,
> cppc_set_perf(cpudata->cpu, &perf_ctrls);
> }
>
> -DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
> -
> -static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
> - u32 min_perf, u32 des_perf,
> - u32 max_perf, bool fast_switch)
> -{
> - static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
> - max_perf, fast_switch);
> -}
> -
> static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
> {
> u64 aperf, mperf, tsc;
Powered by blists - more mailing lists