lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251201091308.761711-6-vincent.guittot@linaro.org>
Date: Mon,  1 Dec 2025 10:13:07 +0100
From: Vincent Guittot <vincent.guittot@...aro.org>
To: mingo@...hat.com,
	peterz@...radead.org,
	juri.lelli@...hat.com,
	dietmar.eggemann@....com,
	rostedt@...dmis.org,
	bsegall@...gle.com,
	mgorman@...e.de,
	vschneid@...hat.com,
	linux-kernel@...r.kernel.org,
	pierre.gondois@....com,
	kprateek.nayak@....com
Cc: qyousef@...alina.io,
	hongyan.xia2@....com,
	christian.loehle@....com,
	luis.machado@....com,
	Vincent Guittot <vincent.guittot@...aro.org>
Subject: [RFC PATCH 5/6 v7] sched/fair: Enable idle core tracking for !SMT

Enable has_idle_cores at llc level feature for !SMT system for which
CPU equals core.

We don't enable has_idle_core feature of select_idle_cpu to be
conservative and don't parse all CPUs of LLC.

At now, has_idle_cores can be cleared even if a CPU is idle because of
SIS_UTIL but it looks reasonnable as the probablity to get an idle CPU is
low anyway.

Signed-off-by: Vincent Guittot <vincent.guittot@...aro.org>
---
 kernel/sched/fair.c  | 29 +++++++----------------------
 kernel/sched/sched.h | 42 +++++++++++++++++++++++++++++-------------
 2 files changed, 36 insertions(+), 35 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4e94a4cb8caa..9af8d0a61856 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7500,19 +7500,6 @@ static inline int __select_idle_cpu(int cpu, struct task_struct *p)
 	return -1;
 }
 
-#ifdef CONFIG_SCHED_SMT
-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-EXPORT_SYMBOL_GPL(sched_smt_present);
-
-static inline void set_idle_cores(int cpu, int val)
-{
-	struct sched_domain_shared *sds;
-
-	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
-	if (sds)
-		WRITE_ONCE(sds->has_idle_cores, val);
-}
-
 static inline bool test_idle_cores(int cpu)
 {
 	struct sched_domain_shared *sds;
@@ -7524,6 +7511,10 @@ static inline bool test_idle_cores(int cpu)
 	return false;
 }
 
+#ifdef CONFIG_SCHED_SMT
+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+EXPORT_SYMBOL_GPL(sched_smt_present);
+
 /*
  * Scans the local SMT mask to see if the entire core is idle, and records this
  * information in sd_llc_shared->has_idle_cores.
@@ -7611,15 +7602,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
 
 #else /* !CONFIG_SCHED_SMT: */
 
-static inline void set_idle_cores(int cpu, int val)
-{
-}
-
-static inline bool test_idle_cores(int cpu)
-{
-	return false;
-}
-
 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
 {
 	return __select_idle_cpu(core, p);
@@ -7885,6 +7867,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 	if ((unsigned)i < nr_cpumask_bits)
 		return i;
 
+	if (!sched_smt_active())
+		set_idle_cores(target, 0);
+
 	/*
 	 * For cluster machines which have lower sharing cache like L2 or
 	 * LLC Tag, we tend to find an idle CPU in the target's cluster
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 697bd654298a..b9e228333d5e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1591,19 +1591,6 @@ do {						\
 	flags = _raw_spin_rq_lock_irqsave(rq);	\
 } while (0)
 
-#ifdef CONFIG_SCHED_SMT
-extern void __update_idle_core(struct rq *rq);
-
-static inline void update_idle_core(struct rq *rq)
-{
-	if (static_branch_unlikely(&sched_smt_present))
-		__update_idle_core(rq);
-}
-
-#else /* !CONFIG_SCHED_SMT: */
-static inline void update_idle_core(struct rq *rq) { }
-#endif /* !CONFIG_SCHED_SMT */
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
 static inline struct task_struct *task_of(struct sched_entity *se)
@@ -2091,6 +2078,35 @@ static __always_inline bool sched_asym_cpucap_active(void)
 	return static_branch_unlikely(&sched_asym_cpucapacity);
 }
 
+static inline void set_idle_cores(int cpu, int val)
+{
+	struct sched_domain_shared *sds;
+
+	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
+	if (sds)
+		WRITE_ONCE(sds->has_idle_cores, val);
+}
+
+#ifdef CONFIG_SCHED_SMT
+extern void __update_idle_core(struct rq *rq);
+
+static inline void update_idle_core(struct rq *rq)
+{
+	if (static_branch_unlikely(&sched_smt_present))
+		__update_idle_core(rq);
+	else
+		set_idle_cores(cpu_of(rq), 1);
+
+}
+
+#else /* !CONFIG_SCHED_SMT: */
+static inline void update_idle_core(struct rq *rq)
+{
+	set_idle_cores(cpu_of(rq), 1);
+}
+#endif /* !CONFIG_SCHED_SMT */
+
+
 struct sched_group_capacity {
 	atomic_t		ref;
 	/*
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ