lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170708101700.4678-1-joelaf@google.com>
Date:   Sat,  8 Jul 2017 03:17:00 -0700
From:   Joel Fernandes <joelaf@...gle.com>
To:     linux-kernel@...r.kernel.org
Cc:     patrick.bellasi@....com, juri.lelli@....com,
        andresoportus@...gle.com, dietmar.eggemann@....com,
        Joel Fernandes <joelaf@...gle.com>,
        Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
        Len Brown <lenb@...nel.org>,
        "Rafael J . Wysocki" <rjw@...ysocki.net>,
        Viresh Kumar <viresh.kumar@...aro.org>,
        Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>
Subject: [PATCH RFC v3] cpufreq: schedutil: Make iowait boost more energy efficient

Currently the iowait_boost feature in schedutil makes the frequency go to max.
This is to handle a case that Peter described where the through put of
operations involving continuous I/O requests [1].

Instead of going to max, its also possible to achieve the same effect by
ramping up to max if there are repeated IO wait wake ups happening. This patch
is an attempt to do that. We start from the minimum frequency and double the
boost for every consecutive iowait update until we reach the maximum frequency.

I managed to find a Intel machine to test this patch and it is achieving the
desired effect. Also tested on ARM platform and see that there the transient
iowait requests aren't causing frequency spikes.

[1] https://patchwork.kernel.org/patch/9735885/

Cc: Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
Cc: Len Brown <lenb@...nel.org>
Cc: Rafael J. Wysocki <rjw@...ysocki.net>
Cc: Viresh Kumar <viresh.kumar@...aro.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Joel Fernandes <joelaf@...gle.com>
---
 kernel/sched/cpufreq_schedutil.c | 47 +++++++++++++++++++++++++++++++++++-----
 1 file changed, 42 insertions(+), 5 deletions(-)

diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 622eed1b7658..4a2d424d0c58 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -53,7 +53,9 @@ struct sugov_cpu {
 	struct update_util_data update_util;
 	struct sugov_policy *sg_policy;
 
+	bool prev_iowait_boost;
 	unsigned long iowait_boost;
+	unsigned long iowait_boost_min;
 	unsigned long iowait_boost_max;
 	u64 last_update;
 
@@ -168,22 +170,47 @@ static void sugov_get_util(unsigned long *util, unsigned long *max)
 	*max = cfs_max;
 }
 
+static void sugov_decay_iowait_boost(struct sugov_cpu *sg_cpu)
+{
+	sg_cpu->iowait_boost >>= 1;
+
+	if (sg_cpu->iowait_boost < sg_cpu->iowait_boost_min)
+		sg_cpu->iowait_boost = 0;
+}
+
 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
 				   unsigned int flags)
 {
 	if (flags & SCHED_CPUFREQ_IOWAIT) {
-		sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+		/* Remember for next time that we did an iowait boost */
+		sg_cpu->prev_iowait_boost = true;
+		if (sg_cpu->iowait_boost) {
+			sg_cpu->iowait_boost <<= 1;
+			sg_cpu->iowait_boost = min(sg_cpu->iowait_boost,
+						   sg_cpu->iowait_boost_max);
+		} else {
+			sg_cpu->iowait_boost = sg_cpu->iowait_boost_min;
+		}
 	} else if (sg_cpu->iowait_boost) {
 		s64 delta_ns = time - sg_cpu->last_update;
 
 		/* Clear iowait_boost if the CPU apprears to have been idle. */
 		if (delta_ns > TICK_NSEC)
 			sg_cpu->iowait_boost = 0;
+
+		/*
+		 * Since we don't decay iowait_boost when its consumed during
+		 * the previous SCHED_CPUFREQ_IOWAIT update, decay it now.
+		 */
+		if (sg_cpu->prev_iowait_boost) {
+			sugov_decay_iowait_boost(sg_cpu);
+			sg_cpu->prev_iowait_boost = false;
+		}
 	}
 }
 
 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
-			       unsigned long *max)
+			       unsigned long *max, unsigned int flags)
 {
 	unsigned long boost_util = sg_cpu->iowait_boost;
 	unsigned long boost_max = sg_cpu->iowait_boost_max;
@@ -195,7 +222,16 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
 		*util = boost_util;
 		*max = boost_max;
 	}
-	sg_cpu->iowait_boost >>= 1;
+
+	/*
+	 * Incase iowait boost just happened on this CPU, don't reduce it right
+	 * away since then the iowait boost will never increase on subsequent
+	 * in_iowait wakeups.
+	 */
+	if (flags & SCHED_CPUFREQ_IOWAIT && this_cpu_ptr(&sugov_cpu) == sg_cpu)
+		return;
+
+	sugov_decay_iowait_boost(sg_cpu);
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
@@ -233,7 +269,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
 		next_f = policy->cpuinfo.max_freq;
 	} else {
 		sugov_get_util(&util, &max);
-		sugov_iowait_boost(sg_cpu, &util, &max);
+		sugov_iowait_boost(sg_cpu, &util, &max, flags);
 		next_f = get_next_freq(sg_policy, util, max);
 		/*
 		 * Do not reduce the frequency if the CPU has not been idle
@@ -279,7 +315,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 			max = j_max;
 		}
 
-		sugov_iowait_boost(j_sg_cpu, &util, &max);
+		sugov_iowait_boost(j_sg_cpu, &util, &max, flags);
 	}
 
 	return get_next_freq(sg_policy, util, max);
@@ -612,6 +648,7 @@ static int sugov_start(struct cpufreq_policy *policy)
 		memset(sg_cpu, 0, sizeof(*sg_cpu));
 		sg_cpu->sg_policy = sg_policy;
 		sg_cpu->flags = SCHED_CPUFREQ_RT;
+		sg_cpu->iowait_boost_min = policy->cpuinfo.min_freq;
 		sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
 		cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
 					     policy_is_shared(policy) ?
-- 
2.13.2.725.g09c95d1e9-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ