[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fb876f94-f92e-4c8a-9b64-fe9118a15595@nvidia.com>
Date: Thu, 8 Jan 2026 19:51:01 +0530
From: Sumit Gupta <sumitg@...dia.com>
To: "zhenglifeng (A)" <zhenglifeng1@...wei.com>, rafael@...nel.org,
viresh.kumar@...aro.org, lenb@...nel.org, robert.moore@...el.com,
corbet@....net, pierre.gondois@....com, rdunlap@...radead.org,
ray.huang@....com, gautham.shenoy@....com, mario.limonciello@....com,
perry.yuan@....com, ionela.voinescu@....com, zhanjie9@...ilicon.com,
linux-pm@...r.kernel.org, linux-acpi@...r.kernel.org,
linux-doc@...r.kernel.org, acpica-devel@...ts.linux.dev,
linux-kernel@...r.kernel.org
Cc: linux-tegra@...r.kernel.org, treding@...dia.com, jonathanh@...dia.com,
vsethi@...dia.com, ksitaraman@...dia.com, sanjayc@...dia.com,
nhartman@...dia.com, bbasu@...dia.com, sumitg@...dia.com
Subject: Re: [PATCH v5 09/11] cpufreq: CPPC: sync policy limits when toggling
auto_select
On 26/12/25 08:25, zhenglifeng (A) wrote:
> External email: Use caution opening links or attachments
>
>
> On 2025/12/23 20:13, Sumit Gupta wrote:
>> When CPPC autonomous selection (auto_select) is enabled or disabled,
>> the policy min/max frequency limits should be updated appropriately to
>> reflect the new operating mode.
>>
>> Currently, toggling auto_select only changes the hardware register but
>> doesn't update the cpufreq policy constraints, which can lead to
>> inconsistent behavior between the hardware state and the policy limits
>> visible to userspace.
>>
>> Add cppc_cpufreq_update_autosel_config() function to handle the
>> auto_select toggle by syncing min/max_perf values with policy
>> constraints. When enabling auto_sel, restore preserved min/max_perf
>> values to policy limits. When disabling, reset policy to defaults
>> while preserving hardware register values for later use.
>>
>> Signed-off-by: Sumit Gupta <sumitg@...dia.com>
>> ---
>> drivers/cpufreq/cppc_cpufreq.c | 112 +++++++++++++++++++++++++++------
>> 1 file changed, 92 insertions(+), 20 deletions(-)
>>
>> diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
>> index 0202c7b823e6..b1f570d6de34 100644
>> --- a/drivers/cpufreq/cppc_cpufreq.c
>> +++ b/drivers/cpufreq/cppc_cpufreq.c
>> @@ -544,14 +544,20 @@ static void populate_efficiency_class(void)
>> * cppc_cpufreq_set_mperf_limit - Set min/max performance limit
>> * @policy: cpufreq policy
>> * @val: performance value to set
>> + * @update_reg: whether to update hardware register
>> * @update_policy: whether to update policy constraints
>> * @is_min: true for min_perf, false for max_perf
>> *
>> + * When @update_reg is true, writes to HW registers and preserves values.
>> * When @update_policy is true, updates cpufreq policy frequency limits.
>> + *
>> + * @update_reg is false when disabling auto_sel to preserve HW values.
>> + * The preserved value is used on next enabling of the autonomous mode.
>> * @update_policy is false during cpu_init when policy isn't fully set up.
>> */
>> static int cppc_cpufreq_set_mperf_limit(struct cpufreq_policy *policy, u64 val,
>> - bool update_policy, bool is_min)
>> + bool update_reg, bool update_policy,
>> + bool is_min)
>> {
>> struct cppc_cpudata *cpu_data = policy->driver_data;
>> struct cppc_perf_caps *caps = &cpu_data->perf_caps;
>> @@ -563,19 +569,22 @@ static int cppc_cpufreq_set_mperf_limit(struct cpufreq_policy *policy, u64 val,
>>
>> perf = clamp(val, caps->lowest_perf, caps->highest_perf);
>>
>> - ret = is_min ? cppc_set_min_perf(cpu, perf) :
>> - cppc_set_max_perf(cpu, perf);
>> - if (ret) {
>> - if (ret != -EOPNOTSUPP)
>> - pr_warn("Failed to set %s_perf (%llu) on CPU%d (%d)\n",
>> - is_min ? "min" : "max", (u64)perf, cpu, ret);
>> - return ret;
>> - }
>> + if (update_reg) {
>> + ret = is_min ? cppc_set_min_perf(cpu, perf) :
>> + cppc_set_max_perf(cpu, perf);
>> + if (ret) {
>> + if (ret != -EOPNOTSUPP)
>> + pr_warn("CPU%d: set %s_perf=%llu failed (%d)\n",
>> + cpu, is_min ? "min" : "max",
>> + (u64)perf, ret);
>> + return ret;
>> + }
>>
>> - if (is_min)
>> - cpu_data->perf_ctrls.min_perf = perf;
>> - else
>> - cpu_data->perf_ctrls.max_perf = perf;
>> + if (is_min)
>> + cpu_data->perf_ctrls.min_perf = perf;
>> + else
>> + cpu_data->perf_ctrls.max_perf = perf;
>> + }
>>
>> if (update_policy) {
>> freq = cppc_perf_to_khz(caps, perf);
>> @@ -592,11 +601,74 @@ static int cppc_cpufreq_set_mperf_limit(struct cpufreq_policy *policy, u64 val,
>> return 0;
>> }
>>
>> -#define cppc_cpufreq_set_min_perf(policy, val, update_policy) \
>> - cppc_cpufreq_set_mperf_limit(policy, val, update_policy, true)
>> +#define cppc_cpufreq_set_min_perf(policy, val, update_reg, update_policy) \
>> + cppc_cpufreq_set_mperf_limit(policy, val, update_reg, update_policy, \
>> + true)
>> +
>> +#define cppc_cpufreq_set_max_perf(policy, val, update_reg, update_policy) \
>> + cppc_cpufreq_set_mperf_limit(policy, val, update_reg, update_policy, \
>> + false)
>> +
>> +/**
>> + * cppc_cpufreq_update_autosel_config - Update autonomous selection config
>> + * @policy: cpufreq policy
>> + * @is_auto_sel: enable/disable autonomous selection
>> + *
>> + * Return: 0 on success, negative error code on failure
>> + */
>> +static int cppc_cpufreq_update_autosel_config(struct cpufreq_policy *policy,
>> + bool is_auto_sel)
>> +{
>> + struct cppc_cpudata *cpu_data = policy->driver_data;
>> + struct cppc_perf_caps *caps = &cpu_data->perf_caps;
>> + u64 min_perf = caps->lowest_nonlinear_perf;
>> + u64 max_perf = caps->nominal_perf;
>> + unsigned int cpu = policy->cpu;
>> + bool update_reg = is_auto_sel;
>> + bool update_policy = true;
>> + int ret;
>> +
>> + guard(mutex)(&cppc_cpufreq_update_autosel_config_lock);
>> +
>> + if (is_auto_sel) {
>> + /* Use preserved values if available, else use defaults */
>> + if (cpu_data->perf_ctrls.min_perf)
>> + min_perf = cpu_data->perf_ctrls.min_perf;
>> + if (cpu_data->perf_ctrls.max_perf)
>> + max_perf = cpu_data->perf_ctrls.max_perf;
>> + }
> So if !is_auto_sel, min_perf and max_perf reg will be set to
> lowest_nonlinear_perf and nominal_perf, but perf_ctrls.min_perf and
> perf_ctrls.max_perf remain the old value. A little bit strange I think. And
> when this happen, min_freq_req and max_freq_req will retain the value last
> set by the users through min_perf and max_perf. It's that alright?
When disabling: Reset policy to defaults for normal governor control,
but preserve HW min/max_perf values and cached values for when
auto_sel is re-enabled.
When enabling: Restore policy to preserved min/max_perf values.
>> +
>> + /*
>> + * Set min/max performance and update policy constraints.
>> + * When enabling: update both HW registers and policy.
>> + * When disabling: update policy only, preserve HW registers.
>> + * Continue even if min/max are not supported, as EPP and autosel
>> + * might still be supported.
>> + */
>> + ret = cppc_cpufreq_set_min_perf(policy, min_perf, update_reg,
>> + update_policy);
>> + if (ret && ret != -EOPNOTSUPP)
>> + return ret;
>> +
>> + ret = cppc_cpufreq_set_max_perf(policy, max_perf, update_reg,
>> + update_policy);
>> + if (ret && ret != -EOPNOTSUPP)
>> + return ret;
>> +
>> + /* Update auto_sel register */
>> + ret = cppc_set_auto_sel(cpu, is_auto_sel);
>> + if (ret && ret != -EOPNOTSUPP) {
>> + pr_warn("Failed to set auto_sel=%d for CPU%d (%d)\n",
>> + is_auto_sel, cpu, ret);
>> + return ret;
>> + }
>> + if (!ret)
>> + cpu_data->perf_ctrls.auto_sel = is_auto_sel;
>> +
>> + return 0;
> Better to return ret.
Here, return 0 is intentional.
If cppc_set_auto_sel() returns -EOPNOTSUPP, we still consider the
function successful since auto_sel is an optional register.
>> +}
>> +
>>
>> -#define cppc_cpufreq_set_max_perf(policy, val, update_policy) \
>> - cppc_cpufreq_set_mperf_limit(policy, val, update_policy, false)
>> static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
>> {
>> struct cppc_cpudata *cpu_data;
>> @@ -889,7 +961,7 @@ static ssize_t store_auto_select(struct cpufreq_policy *policy,
>> if (ret)
>> return ret;
> Since you already store auto_sel value in perf_ctrls, We can compare the
> new value with perf_ctrls.auto_sel here, and just return if they are the
> same.
Will add in v6.
Thank you,
Sumit Gupta
....
Powered by blists - more mailing lists