[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9f94bb60-4be2-4303-54de-f50bdd7cb3e6@amd.com>
Date: Fri, 27 Sep 2019 16:07:56 +0000
From: "Natarajan, Janakarajan" <Janakarajan.Natarajan@....com>
To: "linux-pm@...r.kernel.org" <linux-pm@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
CC: Thomas Renninger <trenn@...e.com>, Shuah Khan <shuah@...nel.org>,
Pu Wen <puwen@...on.com>, Borislav Petkov <bp@...e.de>,
Allison Randal <allison@...utok.net>,
Thomas Gleixner <tglx@...utronix.de>,
Kate Stewart <kstewart@...uxfoundation.org>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Richard Fontana <rfontana@...hat.com>
Subject: Re: [PATCH 1/2] Modify cpupower to schedule itself on cores it is
reading MSRs from
On 9/18/2019 11:34 AM, Natarajan, Janakarajan wrote:
> Modify cpupower to schedule itself on each of the cpus in the system and
> then get the APERF/MPERF register values.
>
> This is advantageous because an IPI is not generated when a read_msr() is
> executed on the local logical CPU thereby reducing the chance of having
> APERF and MPERF being out of sync.
>
> Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@....com>
Any concerns regarding this patchset?
Thanks.
> ---
> .../utils/idle_monitor/mperf_monitor.c | 38 ++++++++++++++-----
> 1 file changed, 28 insertions(+), 10 deletions(-)
>
> diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
> index 44806a6dae11..8b072e39c897 100644
> --- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
> +++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
> @@ -10,6 +10,7 @@
> #include <stdlib.h>
> #include <string.h>
> #include <limits.h>
> +#include <sched.h>
>
> #include <cpufreq.h>
>
> @@ -86,15 +87,33 @@ static int mperf_get_tsc(unsigned long long *tsc)
> return ret;
> }
>
> +static int get_aperf_mperf(int cpu, unsigned long long *aval,
> + unsigned long long *mval)
> +{
> + cpu_set_t set;
> + int ret;
> +
> + CPU_ZERO(&set);
> + CPU_SET(cpu, &set);
> + if (sched_setaffinity(getpid(), sizeof(set), &set) == -1) {
> + dprint("Could not migrate to cpu: %d\n", cpu);
> + return 1;
> + }
> +
> + ret = read_msr(cpu, MSR_APERF, aval);
> + ret |= read_msr(cpu, MSR_MPERF, mval);
> +
> + return ret;
> +}
> +
> static int mperf_init_stats(unsigned int cpu)
> {
> - unsigned long long val;
> + unsigned long long aval, mval;
> int ret;
>
> - ret = read_msr(cpu, MSR_APERF, &val);
> - aperf_previous_count[cpu] = val;
> - ret |= read_msr(cpu, MSR_MPERF, &val);
> - mperf_previous_count[cpu] = val;
> + ret = get_aperf_mperf(cpu, &aval, &mval);
> + aperf_previous_count[cpu] = aval;
> + mperf_previous_count[cpu] = mval;
> is_valid[cpu] = !ret;
>
> return 0;
> @@ -102,13 +121,12 @@ static int mperf_init_stats(unsigned int cpu)
>
> static int mperf_measure_stats(unsigned int cpu)
> {
> - unsigned long long val;
> + unsigned long long aval, mval;
> int ret;
>
> - ret = read_msr(cpu, MSR_APERF, &val);
> - aperf_current_count[cpu] = val;
> - ret |= read_msr(cpu, MSR_MPERF, &val);
> - mperf_current_count[cpu] = val;
> + ret = get_aperf_mperf(cpu, &aval, &mval);
> + aperf_current_count[cpu] = aval;
> + mperf_current_count[cpu] = mval;
> is_valid[cpu] = !ret;
>
> return 0;
Powered by blists - more mailing lists