lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <bb4b0e4137b62651b9d028925fa8f09ca5fbd989.1570819652.git.Janakarajan.Natarajan@amd.com>
Date:   Fri, 11 Oct 2019 19:37:55 +0000
From:   "Natarajan, Janakarajan" <Janakarajan.Natarajan@....com>
To:     "linux-pm@...r.kernel.org" <linux-pm@...r.kernel.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
CC:     Thomas Renninger <trenn@...e.com>, Shuah Khan <shuah@...nel.org>,
        Pu Wen <puwen@...on.com>, Borislav Petkov <bp@...e.de>,
        Allison Randal <allison@...utok.net>,
        Thomas Gleixner <tglx@...utronix.de>,
        Kate Stewart <kstewart@...uxfoundation.org>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Richard Fontana <rfontana@...hat.com>,
        "Natarajan, Janakarajan" <Janakarajan.Natarajan@....com>
Subject: [PATCHv2 2/3] cpupower: mperf_monitor: Introduce per_cpu_schedule
 flag

The per_cpu_schedule flag is used to move the cpupower process to the cpu
on which we are looking to read the APERF/MPERF registers.

This prevents IPIs from being generated by read_msr()s as we are already
on the cpu of interest.

Ex: If cpupower is running on CPU 0 and we execute

    read_msr(20, MSR_APERF, val) then,
    read_msr(20, MSR_MPERF, val)

    the msr module will generate an IPI from CPU 0 to CPU 20 to query
    for the MSR_APERF and then the MSR_MPERF in separate IPIs.

This delay, caused by IPI latency, between reading the APERF and MPERF
registers may cause both of them to go out of sync.

The use of the per_cpu_schedule flag reduces the probability of this
from happening. It comes at the cost of a negligible increase in cpu
consumption caused by the migration of cpupower across each of the
cpus of the system.

Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@....com>
---
 .../utils/idle_monitor/cpupower-monitor.h     |  1 +
 .../utils/idle_monitor/mperf_monitor.c        | 42 ++++++++++++++-----
 2 files changed, 33 insertions(+), 10 deletions(-)

diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
index 9b612d999660..5b5eb1da0cce 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
@@ -62,6 +62,7 @@ struct cpuidle_monitor {
 	unsigned int overflow_s;
 	struct {
 		unsigned int needs_root:1;
+		unsigned int per_cpu_schedule:1;
 	} flags;
 };
 
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index 7cae74202a4d..afb2e6f8edd3 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -86,15 +86,35 @@ static int mperf_get_tsc(unsigned long long *tsc)
 	return ret;
 }
 
+static int get_aperf_mperf(int cpu, unsigned long long *aval,
+				    unsigned long long *mval)
+{
+	int ret;
+
+	/*
+	 * Running on the cpu from which we read the registers will
+	 * prevent APERF/MPERF from going out of sync because of IPI
+	 * latency introduced by read_msr()s.
+	 */
+	if (mperf_monitor.flags.per_cpu_schedule) {
+		if (bind_cpu(cpu))
+			return 1;
+	}
+
+	ret  = read_msr(cpu, MSR_APERF, aval);
+	ret |= read_msr(cpu, MSR_MPERF, mval);
+
+	return ret;
+}
+
 static int mperf_init_stats(unsigned int cpu)
 {
-	unsigned long long val;
+	unsigned long long aval, mval;
 	int ret;
 
-	ret = read_msr(cpu, MSR_APERF, &val);
-	aperf_previous_count[cpu] = val;
-	ret |= read_msr(cpu, MSR_MPERF, &val);
-	mperf_previous_count[cpu] = val;
+	ret = get_aperf_mperf(cpu, &aval, &mval);
+	aperf_previous_count[cpu] = aval;
+	mperf_previous_count[cpu] = mval;
 	is_valid[cpu] = !ret;
 
 	return 0;
@@ -102,13 +122,12 @@ static int mperf_init_stats(unsigned int cpu)
 
 static int mperf_measure_stats(unsigned int cpu)
 {
-	unsigned long long val;
+	unsigned long long aval, mval;
 	int ret;
 
-	ret = read_msr(cpu, MSR_APERF, &val);
-	aperf_current_count[cpu] = val;
-	ret |= read_msr(cpu, MSR_MPERF, &val);
-	mperf_current_count[cpu] = val;
+	ret = get_aperf_mperf(cpu, &aval, &mval);
+	aperf_current_count[cpu] = aval;
+	mperf_current_count[cpu] = mval;
 	is_valid[cpu] = !ret;
 
 	return 0;
@@ -305,6 +324,9 @@ struct cpuidle_monitor *mperf_register(void)
 	if (init_maxfreq_mode())
 		return NULL;
 
+	if (cpupower_cpu_info.vendor == X86_VENDOR_AMD)
+		mperf_monitor.flags.per_cpu_schedule = 1;
+
 	/* Free this at program termination */
 	is_valid = calloc(cpu_count, sizeof(int));
 	mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ