lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210513074027.543926-4-srikar@linux.vnet.ibm.com>
Date:   Thu, 13 May 2021 13:10:22 +0530
From:   Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
To:     Ingo Molnar <mingo@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>
Cc:     LKML <linux-kernel@...r.kernel.org>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Rik van Riel <riel@...riel.com>,
        Srikar Dronamraju <srikar@...ux.vnet.ibm.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Valentin Schneider <valentin.schneider@....com>,
        Vincent Guittot <vincent.guittot@...aro.org>,
        Dietmar Eggemann <dietmar.eggemann@....com>,
        Gautham R Shenoy <ego@...ux.vnet.ibm.com>,
        Parth Shah <parth@...ux.ibm.com>,
        Aubrey Li <aubrey.li@...ux.intel.com>
Subject: [PATCH v3 3/8] sched/fair: Update idle-core more often

Currently when the scheduler does a load balance and pulls a task or
when a CPU picks up a task during wakeup without having to call
select_idle_cpu(), it never checks if the target CPU is part of the
idle-core. This makes idle-core less accurate.

Given that the identity of idle-core for LLC is maintained, its easy to
update the idle-core as soon as the CPU picks up a task.

This change will update the idle-core whenever a CPU from the idle-core
picks up a task. However if there are multiple idle-cores in the LLC,
and waking CPU happens to be part of the designated idle-core, idle-core
is set to -1. In cases where the scheduler is sure that there are no
more idle-cores, idle-core is set to -2.

To reduce this case, whenever a CPU updates idle-core, it will look for
other cores in the LLC for an idle-core, if the core to which it belongs
to is not idle.

Cc: LKML <linux-kernel@...r.kernel.org>
Cc: Gautham R Shenoy <ego@...ux.vnet.ibm.com>
Cc: Parth Shah <parth@...ux.ibm.com>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Valentin Schneider <valentin.schneider@....com>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>
Cc: Mel Gorman <mgorman@...hsingularity.net>
Cc: Vincent Guittot <vincent.guittot@...aro.org>
Cc: Rik van Riel <riel@...riel.com>
Cc: Aubrey Li <aubrey.li@...ux.intel.com>
Signed-off-by: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
---
Changelog v2->v3:
 - Search for idle-cores in LLC only if idle-core is -1

 kernel/sched/fair.c  | 61 +++++++++++++++++++++++++++++++++++++++-----
 kernel/sched/idle.c  |  6 +++++
 kernel/sched/sched.h |  2 ++
 3 files changed, 63 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c42b2b3cd08f..d002bc95c0bc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6039,6 +6039,13 @@ static inline int __select_idle_cpu(int cpu)
 DEFINE_STATIC_KEY_FALSE(sched_smt_present);
 EXPORT_SYMBOL_GPL(sched_smt_present);
 
+/*
+ * Value of -2 indicates there are no idle-cores in LLC.
+ * Value of -1 indicates an idle-core turned to busy recently.
+ * However there could be other idle-cores in the system.
+ * Anyother value indicates core to which the CPU(value)
+ * belongs is idle.
+ */
 static inline void set_idle_core(int cpu, int val)
 {
 	struct sched_domain_shared *sds;
@@ -6061,6 +6068,44 @@ static inline int get_idle_core(int cpu, int def)
 	return def;
 }
 
+static void set_next_idle_core(int target)
+{
+	struct sched_domain *sd = rcu_dereference(per_cpu(sd_llc, target));
+	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
+	int core, cpu;
+
+	if (!sd)
+		return;
+
+	cpumask_andnot(cpus, sched_domain_span(sd), cpu_smt_mask(target));
+	for_each_cpu_wrap(core, cpus, target) {
+		bool idle = true;
+
+		for_each_cpu(cpu, cpu_smt_mask(core)) {
+			if (!available_idle_cpu(cpu)) {
+				idle = false;
+				break;
+			}
+		}
+
+		if (idle) {
+			set_idle_core(core, per_cpu(smt_id, core));
+			return;
+		}
+
+		cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
+	}
+	set_idle_core(target, -2);
+}
+
+void set_core_busy(int core)
+{
+	rcu_read_lock();
+	if (get_idle_core(core, -1) == per_cpu(smt_id, core))
+		set_idle_core(core, -1);
+	rcu_read_unlock();
+}
+
 /*
  * Scans the local SMT mask to see if the entire core is idle, and records this
  * information in sd_llc_shared->idle_core.
@@ -6071,21 +6116,25 @@ static inline int get_idle_core(int cpu, int def)
 void __update_idle_core(struct rq *rq)
 {
 	int core = cpu_of(rq);
-	int cpu;
+	int cpu, idlecore;
 
 	rcu_read_lock();
-	if (get_idle_core(core, 0) >= 0)
+	idlecore = get_idle_core(core, 0);
+	if (idlecore >= 0)
 		goto unlock;
 
 	for_each_cpu(cpu, cpu_smt_mask(core)) {
-		if (cpu == core)
+		if (cpu == core || available_idle_cpu(cpu))
 			continue;
 
-		if (!available_idle_cpu(cpu))
-			goto unlock;
+		if (idlecore == -1)
+			set_next_idle_core(core);
+
+		goto unlock;
 	}
 
 	set_idle_core(core, per_cpu(smt_id, core));
+
 unlock:
 	rcu_read_unlock();
 }
@@ -6176,7 +6225,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int i
 {
 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
 	int i, cpu, idle_cpu = -1, nr = INT_MAX;
-	bool has_idle_core = (idle_core != -1);
+	bool has_idle_core = (idle_core != -2);
 	int this = smp_processor_id();
 	struct sched_domain *this_sd;
 	u64 time;
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 7ca3d3d86c2a..a9f5a8ace59e 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -431,6 +431,12 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
 
 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
 {
+#ifdef CONFIG_SCHED_SMT
+	int cpu = rq->cpu;
+
+	if (static_branch_likely(&sched_smt_present))
+		set_core_busy(cpu);
+#endif
 }
 
 static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 22fbb50b036e..98c3cfbc5d26 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1115,6 +1115,7 @@ static inline bool is_migration_disabled(struct task_struct *p)
 
 #ifdef CONFIG_SCHED_SMT
 extern void __update_idle_core(struct rq *rq);
+extern void set_core_busy(int cpu);
 
 static inline void update_idle_core(struct rq *rq)
 {
@@ -1124,6 +1125,7 @@ static inline void update_idle_core(struct rq *rq)
 
 #else
 static inline void update_idle_core(struct rq *rq) { }
+static inline void set_core_busy(int cpu) { }
 #endif
 
 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-- 
2.18.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ