[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZTjz2Ox_iqorbejw@FVFF77S0Q05N>
Date: Wed, 25 Oct 2023 11:54:16 +0100
From: Mark Rutland <mark.rutland@....com>
To: Zeng Heng <zengheng4@...wei.com>
Cc: broonie@...nel.org, joey.gouly@....com, will@...nel.org,
amit.kachhap@....com, rafael@...nel.org, catalin.marinas@....com,
james.morse@....com, maz@...nel.org, viresh.kumar@...aro.org,
sumitg@...dia.com, yang@...amperecomputing.com,
linux-kernel@...r.kernel.org, linux-pm@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, wangxiongfeng2@...wei.com,
xiexiuqi@...wei.com, Ionela Voinescu <ionela.voinescu@....com>
Subject: Re: [PATCH 2/3] cpufreq: CPPC: Keep the target core awake when
reading its cpufreq rate
[adding Ionela]
On Wed, Oct 25, 2023 at 05:38:46PM +0800, Zeng Heng wrote:
> As ARM AMU's document says, all counters are subject to any changes
> in clock frequency, including clock stopping caused by the WFI and WFE
> instructions.
>
> Therefore, using smp_call_on_cpu() to trigger target CPU to
> read self's AMU counters, which ensures the counters are working
> properly while cstate feature is enabled.
IIUC there's a pretty deliberate split with all the actual reading of the AMU
living in arch/arm64/kernel/topolgy.c, and the driver code being (relatively)
generic.
We already have code in arch/arm64/kernel/topolgy.c to read counters on a
specific CPU; why can't e reuse that (and avoid exporting cpu_has_amu_feat())?
Mark.
>
> Reported-by: Sumit Gupta <sumitg@...dia.com>
> Link: https://lore.kernel.org/all/20230418113459.12860-7-sumitg@nvidia.com/
> Signed-off-by: Zeng Heng <zengheng4@...wei.com>
> ---
> drivers/cpufreq/cppc_cpufreq.c | 39 ++++++++++++++++++++++++++--------
> 1 file changed, 30 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
> index fe08ca419b3d..321a9dc9484d 100644
> --- a/drivers/cpufreq/cppc_cpufreq.c
> +++ b/drivers/cpufreq/cppc_cpufreq.c
> @@ -90,6 +90,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
> struct cppc_perf_fb_ctrs *fb_ctrs_t0,
> struct cppc_perf_fb_ctrs *fb_ctrs_t1);
>
> +struct fb_ctr_pair {
> + u32 cpu;
> + struct cppc_perf_fb_ctrs fb_ctrs_t0;
> + struct cppc_perf_fb_ctrs fb_ctrs_t1;
> +};
> +
> /**
> * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
> * @work: The work item.
> @@ -840,9 +846,24 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
> return (reference_perf * delta_delivered) / delta_reference;
> }
>
> +static int cppc_get_perf_ctrs_pair(void *val)
> +{
> + struct fb_ctr_pair *fb_ctrs = val;
> + int cpu = fb_ctrs->cpu;
> + int ret;
> +
> + ret = cppc_get_perf_ctrs(cpu, &fb_ctrs->fb_ctrs_t0);
> + if (ret)
> + return ret;
> +
> + udelay(2); /* 2usec delay between sampling */
> +
> + return cppc_get_perf_ctrs(cpu, &fb_ctrs->fb_ctrs_t1);
> +}
> +
> static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
> {
> - struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
> + struct fb_ctr_pair fb_ctrs = { .cpu = cpu, };
> struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
> struct cppc_cpudata *cpu_data = policy->driver_data;
> u64 delivered_perf;
> @@ -850,18 +871,18 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
>
> cpufreq_cpu_put(policy);
>
> - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
> - if (ret)
> - return 0;
> -
> - udelay(2); /* 2usec delay between sampling */
> + if (cpu_has_amu_feat(cpu))
> + ret = smp_call_on_cpu(cpu, cppc_get_perf_ctrs_pair,
> + &fb_ctrs, false);
> + else
> + ret = cppc_get_perf_ctrs_pair(&fb_ctrs);
>
> - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
> if (ret)
> return 0;
>
> - delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
> - &fb_ctrs_t1);
> + delivered_perf = cppc_perf_from_fbctrs(cpu_data,
> + &fb_ctrs.fb_ctrs_t0,
> + &fb_ctrs.fb_ctrs_t1);
>
> return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
> }
> --
> 2.25.1
>
Powered by blists - more mailing lists