lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 25 Sep 2017 17:02:04 -0700
From:   Rohit Jain <rohit.k.jain@...cle.com>
To:     linux-kernel@...r.kernel.org, eas-dev@...ts.linaro.org
Cc:     peterz@...radead.org, mingo@...hat.com, joelaf@...gle.com,
        atish.patra@...cle.com, vincent.guittot@...aro.org,
        dietmar.eggemann@....com, morten.rasmussen@....com
Subject: [PATCH 1/3] sched/fair: Introduce scaled capacity awareness in find_idlest_cpu code path

While looking for idle CPUs for a waking task, we should also account
for the delays caused due to the bandwidth reduction by RT/IRQ tasks.

This patch does that by trying to find a higher capacity CPU with
minimum wake up latency.


Signed-off-by: Rohit Jain <rohit.k.jain@...cle.com>
---
 kernel/sched/fair.c | 27 ++++++++++++++++++++++++---
 1 file changed, 24 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index eca6a57..afb701f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5590,6 +5590,11 @@ static unsigned long capacity_orig_of(int cpu)
 	return cpu_rq(cpu)->cpu_capacity_orig;
 }
 
+static inline bool full_capacity(int cpu)
+{
+	return (capacity_of(cpu) >= (capacity_orig_of(cpu)*819 >> 10));
+}
+
 static unsigned long cpu_avg_load_per_task(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
@@ -5916,8 +5921,10 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 	unsigned long load, min_load = ULONG_MAX;
 	unsigned int min_exit_latency = UINT_MAX;
 	u64 latest_idle_timestamp = 0;
+	unsigned int backup_cap = 0;
 	int least_loaded_cpu = this_cpu;
 	int shallowest_idle_cpu = -1;
+	int shallowest_idle_cpu_backup = -1;
 	int i;
 
 	/* Check if we have any choice: */
@@ -5937,7 +5944,12 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 				 */
 				min_exit_latency = idle->exit_latency;
 				latest_idle_timestamp = rq->idle_stamp;
-				shallowest_idle_cpu = i;
+				if (full_capacity(i)) {
+					shallowest_idle_cpu = i;
+				} else if (capacity_of(i) > backup_cap) {
+					shallowest_idle_cpu_backup = i;
+					backup_cap = capacity_of(i);
+				}
 			} else if ((!idle || idle->exit_latency == min_exit_latency) &&
 				   rq->idle_stamp > latest_idle_timestamp) {
 				/*
@@ -5946,7 +5958,12 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 				 * a warmer cache.
 				 */
 				latest_idle_timestamp = rq->idle_stamp;
-				shallowest_idle_cpu = i;
+				if (full_capacity(i)) {
+					shallowest_idle_cpu = i;
+				} else if (capacity_of(i) > backup_cap) {
+					shallowest_idle_cpu_backup = i;
+					backup_cap = capacity_of(i);
+				}
 			}
 		} else if (shallowest_idle_cpu == -1) {
 			load = weighted_cpuload(cpu_rq(i));
@@ -5957,7 +5974,11 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 		}
 	}
 
-	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
+	if (shallowest_idle_cpu != -1)
+		return shallowest_idle_cpu;
+
+	return (shallowest_idle_cpu_backup != -1 ?
+		shallowest_idle_cpu_backup : least_loaded_cpu);
 }
 
 #ifdef CONFIG_SCHED_SMT
-- 
2.7.4

Powered by blists - more mailing lists