[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250206202109.384179-3-arighi@nvidia.com>
Date: Thu, 6 Feb 2025 21:15:32 +0100
From: Andrea Righi <arighi@...dia.com>
To: Tejun Heo <tj@...nel.org>,
David Vernet <void@...ifault.com>,
Changwoo Min <changwoo@...lia.com>
Cc: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Ian May <ianm@...dia.com>,
bpf@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/5] sched_ext: idle: Introduce SCX_OPS_BUILTIN_IDLE_PER_NODE
Add the new scheduler flag SCX_OPS_BUILTIN_IDLE_PER_NODE, which allows
scx schedulers to select between using a global flat idle cpumask or
multiple per-node cpumasks.
This only introduces the flag and the mechanism to enable/disable this
feature without affecting any scheduling behavior.
Signed-off-by: Andrea Righi <arighi@...dia.com>
---
kernel/sched/ext.c | 21 +++++++++++++++++++--
kernel/sched/ext_idle.c | 33 +++++++++++++++++++++++++--------
kernel/sched/ext_idle.h | 6 ++++--
3 files changed, 48 insertions(+), 12 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 8a9a30895381a..0063a646124bc 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -125,6 +125,12 @@ enum scx_ops_flags {
*/
SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
+ /*
+ * If set, enable per-node idle cpumasks. If clear, use a single global
+ * flat idle cpumask.
+ */
+ SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 4,
+
/*
* CPU cgroup support flags
*/
@@ -134,6 +140,7 @@ enum scx_ops_flags {
SCX_OPS_ENQ_LAST |
SCX_OPS_ENQ_EXITING |
SCX_OPS_SWITCH_PARTIAL |
+ SCX_OPS_BUILTIN_IDLE_PER_NODE |
SCX_OPS_HAS_CGROUP_WEIGHT,
};
@@ -3344,7 +3351,7 @@ static void handle_hotplug(struct rq *rq, bool online)
atomic_long_inc(&scx_hotplug_seq);
if (scx_enabled())
- scx_idle_update_selcpu_topology();
+ scx_idle_update_selcpu_topology(&scx_ops);
if (online && SCX_HAS_OP(cpu_online))
SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
@@ -5116,6 +5123,16 @@ static int validate_ops(const struct sched_ext_ops *ops)
return -EINVAL;
}
+ /*
+ * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
+ * selection policy to be enabled.
+ */
+ if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
+ (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
+ scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -5240,7 +5257,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
static_branch_enable_cpuslocked(&scx_has_op[i]);
check_hotplug_seq(ops);
- scx_idle_update_selcpu_topology();
+ scx_idle_update_selcpu_topology(ops);
cpus_read_unlock();
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index cb981956005b4..a3f2b00903ac2 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -14,6 +14,9 @@
/* Enable/disable built-in idle CPU selection policy */
DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
+/* Enable/disable per-node idle cpumasks */
+DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
+
#ifdef CONFIG_SMP
#ifdef CONFIG_CPUMASK_OFFSTACK
#define CL_ALIGNED_IF_ONSTACK
@@ -204,9 +207,9 @@ static bool llc_numa_mismatch(void)
* CPU belongs to a single LLC domain, and that each LLC domain is entirely
* contained within a single NUMA node.
*/
-void scx_idle_update_selcpu_topology(void)
+void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
{
- bool enable_llc = false, enable_numa = false;
+ bool enable_llc = false, enable_numa = false, enable_idle_node = false;
unsigned int nr_cpus;
s32 cpu = cpumask_first(cpu_online_mask);
@@ -237,13 +240,21 @@ void scx_idle_update_selcpu_topology(void)
* If all CPUs belong to the same NUMA node and the same LLC domain,
* enabling both NUMA and LLC optimizations is unnecessary, as checking
* for an idle CPU in the same domain twice is redundant.
+ *
+ * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA
+ * optimization, as we would naturally select idle CPUs within
+ * specific NUMA nodes querying the corresponding per-node cpumask.
*/
- nr_cpus = numa_weight(cpu);
- if (nr_cpus > 0) {
- if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
- enable_numa = true;
- pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
- cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
+ if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) {
+ enable_idle_node = true;
+ } else {
+ nr_cpus = numa_weight(cpu);
+ if (nr_cpus > 0) {
+ if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
+ enable_numa = true;
+ pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
+ cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
+ }
}
rcu_read_unlock();
@@ -251,6 +262,8 @@ void scx_idle_update_selcpu_topology(void)
str_enabled_disabled(enable_llc));
pr_debug("sched_ext: NUMA idle selection %s\n",
str_enabled_disabled(enable_numa));
+ pr_debug("sched_ext: per-node idle cpumask %s\n",
+ str_enabled_disabled(enable_idle_node));
if (enable_llc)
static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
@@ -260,6 +273,10 @@ void scx_idle_update_selcpu_topology(void)
static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
else
static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
+ if (enable_idle_node)
+ static_branch_enable_cpuslocked(&scx_builtin_idle_per_node);
+ else
+ static_branch_disable_cpuslocked(&scx_builtin_idle_per_node);
}
/*
diff --git a/kernel/sched/ext_idle.h b/kernel/sched/ext_idle.h
index 7a13a74815ba7..d005bd22c19a5 100644
--- a/kernel/sched/ext_idle.h
+++ b/kernel/sched/ext_idle.h
@@ -10,19 +10,21 @@
#ifndef _KERNEL_SCHED_EXT_IDLE_H
#define _KERNEL_SCHED_EXT_IDLE_H
+struct sched_ext_ops;
+
extern struct static_key_false scx_builtin_idle_enabled;
#ifdef CONFIG_SMP
extern struct static_key_false scx_selcpu_topo_llc;
extern struct static_key_false scx_selcpu_topo_numa;
-void scx_idle_update_selcpu_topology(void);
+void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops);
void scx_idle_reset_masks(void);
void scx_idle_init_masks(void);
bool scx_idle_test_and_clear_cpu(int cpu);
s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags);
#else /* !CONFIG_SMP */
-static inline void scx_idle_update_selcpu_topology(void) {}
+static inline void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) {}
static inline void scx_idle_reset_masks(void) {}
static inline void scx_idle_init_masks(void) {}
static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; }
--
2.48.1
Powered by blists - more mailing lists