[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <28a6e60c-4492-105b-5fcf-3129ca868349@nvidia.com>
Date: Wed, 25 Oct 2023 20:27:23 +0530
From: Sumit Gupta <sumitg@...dia.com>
To: Zeng Heng <zengheng4@...wei.com>,
Beata Michalska <beata.michalska@....com>
CC: <broonie@...nel.org>, <joey.gouly@....com>, <will@...nel.org>,
<amit.kachhap@....com>, <rafael@...nel.org>,
<catalin.marinas@....com>, <james.morse@....com>, <maz@...nel.org>,
<viresh.kumar@...aro.org>, <yang@...amperecomputing.com>,
<linux-kernel@...r.kernel.org>, <linux-pm@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<wangxiongfeng2@...wei.com>, <xiexiuqi@...wei.com>,
Ionela Voinescu <ionela.voinescu@....com>,
linux-tegra <linux-tegra@...r.kernel.org>,
"Mark Rutland" <mark.rutland@....com>,
Sumit Gupta <sumitg@...dia.com>
Subject: Re: [PATCH 2/3] cpufreq: CPPC: Keep the target core awake when
reading its cpufreq rate
> [adding Ionela]
>
> On Wed, Oct 25, 2023 at 05:38:46PM +0800, Zeng Heng wrote:
>> As ARM AMU's document says, all counters are subject to any changes
>> in clock frequency, including clock stopping caused by the WFI and WFE
>> instructions.
>>
>> Therefore, using smp_call_on_cpu() to trigger target CPU to
>> read self's AMU counters, which ensures the counters are working
>> properly while cstate feature is enabled.
>
> IIUC there's a pretty deliberate split with all the actual reading of the AMU
> living in arch/arm64/kernel/topolgy.c, and the driver code being (relatively)
> generic.
>
> We already have code in arch/arm64/kernel/topolgy.c to read counters on a
> specific CPU; why can't e reuse that (and avoid exporting cpu_has_amu_feat())?
This patch seems mostly based on my previous patch [1] and discussed
here [2] already. Beata [CCed] shared an alternate approach [3]
leveraging existing code from 'topology.c' to get the average freq for
last tick period.
Beata,
Could you share v2 of [3] with the request to merge. We can try to solve
the issue with CPU IDLE case later on top?
Additionally, also please include the fix in [4] if it looks fine.
Best Regards,
Sumit Gupta
[1] https://lore.kernel.org/all/20230418113459.12860-7-sumitg@nvidia.com/
[2]
https://lore.kernel.org/lkml/cde1d8a9-3a21-e82b-7895-40603a14d898@nvidia.com/T/#m2174305de4706006e0bd9c103a0e5ff61cea7a12
[3]
https://lore.kernel.org/lkml/20230606155754.245998-1-beata.michalska@arm.com/
[4]
https://lore.kernel.org/lkml/6a5710f6-bfbb-5dfd-11cd-0cd02220cee7@nvidia.com/
>>
>> Reported-by: Sumit Gupta <sumitg@...dia.com>
>> Link: https://lore.kernel.org/all/20230418113459.12860-7-sumitg@nvidia.com/
>> Signed-off-by: Zeng Heng <zengheng4@...wei.com>
>> ---
>> drivers/cpufreq/cppc_cpufreq.c | 39 ++++++++++++++++++++++++++--------
>> 1 file changed, 30 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
>> index fe08ca419b3d..321a9dc9484d 100644
>> --- a/drivers/cpufreq/cppc_cpufreq.c
>> +++ b/drivers/cpufreq/cppc_cpufreq.c
>> @@ -90,6 +90,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
>> struct cppc_perf_fb_ctrs *fb_ctrs_t0,
>> struct cppc_perf_fb_ctrs *fb_ctrs_t1);
>>
>> +struct fb_ctr_pair {
>> + u32 cpu;
>> + struct cppc_perf_fb_ctrs fb_ctrs_t0;
>> + struct cppc_perf_fb_ctrs fb_ctrs_t1;
>> +};
>> +
>> /**
>> * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
>> * @work: The work item.
>> @@ -840,9 +846,24 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
>> return (reference_perf * delta_delivered) / delta_reference;
>> }
>>
>> +static int cppc_get_perf_ctrs_pair(void *val)
>> +{
>> + struct fb_ctr_pair *fb_ctrs = val;
>> + int cpu = fb_ctrs->cpu;
>> + int ret;
>> +
>> + ret = cppc_get_perf_ctrs(cpu, &fb_ctrs->fb_ctrs_t0);
>> + if (ret)
>> + return ret;
>> +
>> + udelay(2); /* 2usec delay between sampling */
>> +
>> + return cppc_get_perf_ctrs(cpu, &fb_ctrs->fb_ctrs_t1);
>> +}
>> +
>> static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
>> {
>> - struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
>> + struct fb_ctr_pair fb_ctrs = { .cpu = cpu, };
>> struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
>> struct cppc_cpudata *cpu_data = policy->driver_data;
>> u64 delivered_perf;
>> @@ -850,18 +871,18 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
>>
>> cpufreq_cpu_put(policy);
>>
>> - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
>> - if (ret)
>> - return 0;
>> -
>> - udelay(2); /* 2usec delay between sampling */
>> + if (cpu_has_amu_feat(cpu))
>> + ret = smp_call_on_cpu(cpu, cppc_get_perf_ctrs_pair,
>> + &fb_ctrs, false);
>> + else
>> + ret = cppc_get_perf_ctrs_pair(&fb_ctrs);
>>
>> - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
>> if (ret)
>> return 0;
>>
>> - delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
>> - &fb_ctrs_t1);
>> + delivered_perf = cppc_perf_from_fbctrs(cpu_data,
>> + &fb_ctrs.fb_ctrs_t0,
>> + &fb_ctrs.fb_ctrs_t1);
>>
>> return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
>> }
>> --
>> 2.25.1
>>
Powered by blists - more mailing lists