[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJZ5v0hLn9APoLL13eEjTDKZNr0o5JnvTZrit793R2dAtvj79w@mail.gmail.com>
Date: Tue, 19 Apr 2022 18:15:56 +0200
From: "Rafael J. Wysocki" <rafael@...nel.org>
To: Thomas Gleixner <tglx@...utronix.de>
Cc: LKML <linux-kernel@...r.kernel.org>,
"the arch/x86 maintainers" <x86@...nel.org>,
"Rafael J. Wysocki" <rafael@...nel.org>,
Linux PM <linux-pm@...r.kernel.org>,
Eric Dumazet <edumazet@...gle.com>,
"Paul E. McKenney" <paulmck@...nel.org>
Subject: Re: [patch 05/10] x86/aperfmperf: Put frequency invariance
aperf/mperf data into a struct
On Fri, Apr 15, 2022 at 9:19 PM Thomas Gleixner <tglx@...utronix.de> wrote:
>
> Preparation for sharing code with the CPU frequency portion of the
> aperf/mperf code.
>
> No functional change.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
All good AFAICS:
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
> ---
> arch/x86/kernel/cpu/aperfmperf.c | 26 +++++++++++++++-----------
> 1 file changed, 15 insertions(+), 11 deletions(-)
>
> --- a/arch/x86/kernel/cpu/aperfmperf.c
> +++ b/arch/x86/kernel/cpu/aperfmperf.c
> @@ -22,6 +22,13 @@
>
> #include "cpu.h"
>
> +struct aperfmperf {
> + u64 aperf;
> + u64 mperf;
> +};
> +
> +static DEFINE_PER_CPU_SHARED_ALIGNED(struct aperfmperf, cpu_samples);
> +
> struct aperfmperf_sample {
> unsigned int khz;
> atomic_t scfpending;
> @@ -194,8 +201,6 @@ unsigned int arch_freq_get_on_cpu(int cp
>
> DEFINE_STATIC_KEY_FALSE(arch_scale_freq_key);
>
> -static DEFINE_PER_CPU(u64, arch_prev_aperf);
> -static DEFINE_PER_CPU(u64, arch_prev_mperf);
> static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE;
> static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE;
>
> @@ -407,8 +412,8 @@ static void init_counter_refs(void)
> rdmsrl(MSR_IA32_APERF, aperf);
> rdmsrl(MSR_IA32_MPERF, mperf);
>
> - this_cpu_write(arch_prev_aperf, aperf);
> - this_cpu_write(arch_prev_mperf, mperf);
> + this_cpu_write(cpu_samples.aperf, aperf);
> + this_cpu_write(cpu_samples.mperf, mperf);
> }
>
> #ifdef CONFIG_PM_SLEEP
> @@ -474,9 +479,8 @@ DEFINE_PER_CPU(unsigned long, arch_freq_
>
> void arch_scale_freq_tick(void)
> {
> - u64 freq_scale;
> - u64 aperf, mperf;
> - u64 acnt, mcnt;
> + struct aperfmperf *s = this_cpu_ptr(&cpu_samples);
> + u64 aperf, mperf, acnt, mcnt, freq_scale;
>
> if (!arch_scale_freq_invariant())
> return;
> @@ -484,11 +488,11 @@ void arch_scale_freq_tick(void)
> rdmsrl(MSR_IA32_APERF, aperf);
> rdmsrl(MSR_IA32_MPERF, mperf);
>
> - acnt = aperf - this_cpu_read(arch_prev_aperf);
> - mcnt = mperf - this_cpu_read(arch_prev_mperf);
> + acnt = aperf - s->aperf;
> + mcnt = mperf - s->mperf;
>
> - this_cpu_write(arch_prev_aperf, aperf);
> - this_cpu_write(arch_prev_mperf, mperf);
> + s->aperf = aperf;
> + s->mperf = mperf;
>
> if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
> goto error;
>
Powered by blists - more mailing lists