[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251021145706.61534-3-arighi@nvidia.com>
Date: Tue, 21 Oct 2025 16:55:43 +0200
From: Andrea Righi <arighi@...dia.com>
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Tejun Heo <tj@...nel.org>,
David Vernet <void@...ifault.com>,
Changwoo Min <changwoo@...lia.com>,
Joel Fernandes <joelagnelf@...dia.com>,
sched-ext@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/3] sched: Expose sd_llc_shared->has_idle_cores to other sched classes
Move the logic to manage sd_llc_shared->has_idle_cores to sched/idle.c
and relocate the set/test/update helpers to sched.h so they can be
reused by other sched classes (e.g., sched_ext).
No functional changes.
Signed-off-by: Andrea Righi <arighi@...dia.com>
---
kernel/sched/fair.c | 63 +-------------------------------------------
kernel/sched/idle.c | 31 ++++++++++++++++++++++
kernel/sched/sched.h | 55 +++++++++++++++++++++++++++++---------
3 files changed, 74 insertions(+), 75 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ef0a1bd26450b..64ec1527d1af4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2080,8 +2080,6 @@ numa_type numa_classify(unsigned int imbalance_pct,
}
#ifdef CONFIG_SCHED_SMT
-/* Forward declarations of select_idle_sibling helpers */
-static inline bool test_idle_cores(int cpu);
static inline int numa_idle_core(int idle_core, int cpu)
{
if (!static_branch_likely(&sched_smt_present) ||
@@ -7539,59 +7537,10 @@ static inline int __select_idle_cpu(int cpu, struct task_struct *p)
}
#ifdef CONFIG_SCHED_SMT
-static inline void set_idle_cores(int cpu, int val)
-{
- struct sched_domain_shared *sds;
-
- sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
- if (sds)
- WRITE_ONCE(sds->has_idle_cores, val);
-}
-
-static inline bool test_idle_cores(int cpu)
-{
- struct sched_domain_shared *sds;
-
- sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
- if (sds)
- return READ_ONCE(sds->has_idle_cores);
-
- return false;
-}
-
-/*
- * Scans the local SMT mask to see if the entire core is idle, and records this
- * information in sd_llc_shared->has_idle_cores.
- *
- * Since SMT siblings share all cache levels, inspecting this limited remote
- * state should be fairly cheap.
- */
-void __update_idle_core(struct rq *rq)
-{
- int core = cpu_of(rq);
- int cpu;
-
- rcu_read_lock();
- if (test_idle_cores(core))
- goto unlock;
-
- for_each_cpu(cpu, cpu_smt_mask(core)) {
- if (cpu == core)
- continue;
-
- if (!available_idle_cpu(cpu))
- goto unlock;
- }
-
- set_idle_cores(core, 1);
-unlock:
- rcu_read_unlock();
-}
-
/*
* Scan the entire LLC domain for idle cores; this dynamically switches off if
* there are no idle cores left in the system; tracked through
- * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
+ * sd_llc->shared->has_idle_cores and enabled through update_idle_core().
*/
static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
{
@@ -7645,16 +7594,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
}
#else /* !CONFIG_SCHED_SMT: */
-
-static inline void set_idle_cores(int cpu, int val)
-{
-}
-
-static inline bool test_idle_cores(int cpu)
-{
- return false;
-}
-
static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
{
return __select_idle_cpu(core, p);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index c39b089d4f09b..06b67c88e70e6 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -452,6 +452,37 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
resched_curr(rq);
}
+#ifdef CONFIG_SCHED_SMT
+/*
+ * Scans the local SMT mask to see if the entire core is idle, and records this
+ * information in sd_llc_shared->has_idle_cores.
+ *
+ * Since SMT siblings share all cache levels, inspecting this limited remote
+ * state should be fairly cheap.
+ */
+void __update_idle_core(struct rq *rq)
+{
+ int core = cpu_of(rq);
+ int cpu;
+
+ rcu_read_lock();
+ if (test_idle_cores(core))
+ goto unlock;
+
+ for_each_cpu(cpu, cpu_smt_mask(core)) {
+ if (cpu == core)
+ continue;
+
+ if (!available_idle_cpu(cpu))
+ goto unlock;
+ }
+
+ set_idle_cores(core, 1);
+unlock:
+ rcu_read_unlock();
+}
+#endif /* !CONFIG_SCHED_SMT: */
+
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
dl_server_update_idle_time(rq, prev);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1f5d07067f60a..b943ebbc3fcb1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1579,19 +1579,6 @@ do { \
flags = _raw_spin_rq_lock_irqsave(rq); \
} while (0)
-#ifdef CONFIG_SCHED_SMT
-extern void __update_idle_core(struct rq *rq);
-
-static inline void update_idle_core(struct rq *rq)
-{
- if (static_branch_unlikely(&sched_smt_present))
- __update_idle_core(rq);
-}
-
-#else /* !CONFIG_SCHED_SMT: */
-static inline void update_idle_core(struct rq *rq) { }
-#endif /* !CONFIG_SCHED_SMT */
-
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline struct task_struct *task_of(struct sched_entity *se)
@@ -2069,6 +2056,48 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
extern struct static_key_false sched_asym_cpucapacity;
extern struct static_key_false sched_cluster_active;
+#ifdef CONFIG_SCHED_SMT
+static inline void set_idle_cores(int cpu, int val)
+{
+ struct sched_domain_shared *sds;
+
+ sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
+ if (sds)
+ WRITE_ONCE(sds->has_idle_cores, val);
+}
+
+static inline bool test_idle_cores(int cpu)
+{
+ struct sched_domain_shared *sds;
+
+ sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
+ if (sds)
+ return READ_ONCE(sds->has_idle_cores);
+
+ return false;
+}
+
+extern void __update_idle_core(struct rq *rq);
+
+static inline void update_idle_core(struct rq *rq)
+{
+ if (static_branch_unlikely(&sched_smt_present))
+ __update_idle_core(rq);
+}
+
+#else /* !CONFIG_SCHED_SMT: */
+static inline void set_idle_cores(int cpu, int val)
+{
+}
+
+static inline bool test_idle_cores(int cpu)
+{
+ return false;
+}
+
+static inline void update_idle_core(struct rq *rq) { }
+#endif /* CONFIG_SCHED_SMT */
+
static __always_inline bool sched_asym_cpucap_active(void)
{
return static_branch_unlikely(&sched_asym_cpucapacity);
--
2.51.1
Powered by blists - more mailing lists