[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20241205212627.193016-5-arighi@nvidia.com>
Date: Thu, 5 Dec 2024 22:00:26 +0100
From: Andrea Righi <arighi@...dia.com>
To: Tejun Heo <tj@...nel.org>,
David Vernet <void@...ifault.com>
Cc: Yury Norov <yury.norov@...il.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 4/4] sched_ext: Introduce NUMA aware idle cpu kfunc helpers
Add the following kfunc's to provide scx schedulers direct access to
per-node idle cpumasks information:
const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
s32 scx_bpf_pick_idle_cpu_node(int node,
const cpumask_t *cpus_allowed, u64 flags)
Signed-off-by: Andrea Righi <arighi@...dia.com>
---
kernel/sched/ext.c | 85 +++++++++++++++++++++++-
tools/sched_ext/include/scx/common.bpf.h | 3 +
2 files changed, 86 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index d0d57323bcfc..b3d20e396b43 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -433,6 +433,7 @@ struct sched_ext_ops {
* - scx_bpf_select_cpu_dfl()
* - scx_bpf_test_and_clear_cpu_idle()
* - scx_bpf_pick_idle_cpu()
+ * - scx_bpf_pick_idle_cpu_node()
*
* The user also must implement ops.select_cpu() as the default
* implementation relies on scx_bpf_select_cpu_dfl().
@@ -955,6 +956,8 @@ static struct cpumask *get_idle_cpumask_node(int node)
if (!static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node))
return idle_masks[0]->cpu;
+ if (node < 0 || node >= num_possible_nodes())
+ return NULL;
return idle_masks[node]->cpu;
}
@@ -963,6 +966,8 @@ static struct cpumask *get_idle_smtmask_node(int node)
if (!static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node))
return idle_masks[0]->smt;
+ if (node < 0 || node >= num_possible_nodes())
+ return NULL;
return idle_masks[node]->smt;
}
@@ -7499,11 +7504,32 @@ __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
*/
}
+/**
+ * scx_bpf_get_idle_cpumask_node - Get a referenced kptr to the idle-tracking
+ * per-CPU cpumask of a target NUMA node.
+ *
+ * Returns an empty cpumask if idle tracking is not enabled, if @node is not
+ * valid, or running on a UP kernel.
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return cpu_none_mask;
+ }
+ if (!static_branch_likely(&scx_builtin_idle_per_node)) {
+ scx_ops_error("per-node idle tracking is disabled");
+ return cpu_none_mask;
+ }
+
+ return get_idle_cpumask_node(node) ? : cpu_none_mask;
+}
/**
* scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
* per-CPU cpumask of the current NUMA node.
*
- * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
+ * Returns an emtpy cpumask if idle tracking is not enabled, or running on a UP
+ * kernel.
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
{
@@ -7515,12 +7541,35 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
return get_curr_idle_cpumask();
}
+/**
+ * scx_bpf_get_idle_smtmask_node - Get a referenced kptr to the idle-tracking,
+ * per-physical-core cpumask of a target NUMA node. Can be used to determine
+ * if an entire physical core is free.
+ *
+ * Returns an empty cpumask if idle tracking is not enabled, if @node is not
+ * valid, or running on a UP kernel.
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return cpu_none_mask;
+ }
+ if (!static_branch_likely(&scx_builtin_idle_per_node)) {
+ scx_ops_error("per-node idle tracking is disabled");
+ return cpu_none_mask;
+ }
+
+ return get_idle_smtmask_node(node) ? : cpu_none_mask;
+}
+
/**
* scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
* per-physical-core cpumask of the current NUMA node. Can be used to determine
* if an entire physical core is free.
*
- * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
+ * Returns an empty cumask if idle tracking is not enabled, or running on a UP
+ * kernel.
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
{
@@ -7569,6 +7618,35 @@ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
return false;
}
+/**
+ * scx_bpf_pick_idle_cpu_node - Pick and claim an idle cpu from a NUMA node
+ * @node: target NUMA node
+ * @cpus_allowed: Allowed cpumask
+ * @flags: %SCX_PICK_IDLE_CPU_* flags
+ *
+ * Pick and claim an idle cpu in @cpus_allowed from the NUMA node @node.
+ * Returns the picked idle cpu number on success. -%EBUSY if no matching cpu
+ * was found.
+ *
+ * Unavailable if ops.update_idle() is implemented and
+ * %SCX_OPS_KEEP_BUILTIN_IDLE is not set or if %SCX_OPS_KEEP_BUILTIN_IDLE is
+ * not set.
+ */
+__bpf_kfunc s32 scx_bpf_pick_idle_cpu_node(int node, const struct cpumask *cpus_allowed,
+ u64 flags)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return -EBUSY;
+ }
+ if (!static_branch_likely(&scx_builtin_idle_per_node)) {
+ scx_ops_error("per-node idle tracking is disabled");
+ return -EBUSY;
+ }
+
+ return scx_pick_idle_cpu_from_node(node, cpus_allowed, flags);
+}
+
/**
* scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
* @cpus_allowed: Allowed cpumask
@@ -7709,10 +7787,13 @@ BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask_node, KF_ACQUIRE)
BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask_node, KF_ACQUIRE)
BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 625f5b046776..41b12f9c6b36 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -63,10 +63,13 @@ const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak;
const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak;
void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak;
const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym;
+const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym;
const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym;
+const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym;
void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym;
bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym;
s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
+s32 scx_bpf_pick_idle_cpu_node(int node, const cpumask_t *cpus_allowed, u64 flags) __ksym;
s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
bool scx_bpf_task_running(const struct task_struct *p) __ksym;
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
--
2.47.1
Powered by blists - more mailing lists