[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250206202109.384179-2-arighi@nvidia.com>
Date: Thu, 6 Feb 2025 21:15:31 +0100
From: Andrea Righi <arighi@...dia.com>
To: Tejun Heo <tj@...nel.org>,
David Vernet <void@...ifault.com>,
Changwoo Min <changwoo@...lia.com>
Cc: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Ian May <ianm@...dia.com>,
bpf@...r.kernel.org,
linux-kernel@...r.kernel.org,
Yury Norov <yury.norov@...il.com>
Subject: [PATCH 1/5] sched/topology: Introduce for_each_numa_node() iterator
Introduce for_each_numa_node() and sched_numa_node() helpers to iterate
over node IDs in order of increasing NUMA distance from a given starting
node.
These iterator functions are similar to for_each_numa_hop_mask() and
sched_numa_hop_mask(), but instead of providing a cpumask at each
iteration, they provide a node ID.
Example usage:
nodemask_t visited = NODE_MASK_NONE;
int start = cpu_to_node(smp_processor_id());
for_each_numa_node(node, start, visited, N_ONLINE)
pr_info("node (%d, %d) -> %d\n",
start, node, node_distance(start, node));
On a system with equidistant nodes:
$ numactl -H
...
node distances:
node 0 1 2 3
0: 10 20 20 20
1: 20 10 20 20
2: 20 20 10 20
3: 20 20 20 10
Output of the example above (on node 0):
[ 7.367022] node (0, 0) -> 10
[ 7.367151] node (0, 1) -> 20
[ 7.367186] node (0, 2) -> 20
[ 7.367247] node (0, 3) -> 20
On a system with non-equidistant nodes (simulated using virtme-ng):
$ numactl -H
...
node distances:
node 0 1 2 3
0: 10 51 31 41
1: 51 10 21 61
2: 31 21 10 11
3: 41 61 11 10
Output of the example above (on node 0):
[ 8.953644] node (0, 0) -> 10
[ 8.953712] node (0, 2) -> 31
[ 8.953764] node (0, 3) -> 41
[ 8.953817] node (0, 1) -> 51
Cc: Yury Norov <yury.norov@...il.com>
Signed-off-by: Andrea Righi <arighi@...dia.com>
---
include/linux/topology.h | 31 ++++++++++++++++++++++++++++-
kernel/sched/topology.c | 42 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 72 insertions(+), 1 deletion(-)
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 52f5850730b3e..0c82b913a8814 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -248,12 +248,18 @@ static inline const struct cpumask *cpu_cpu_mask(int cpu)
#ifdef CONFIG_NUMA
int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
-#else
+extern int sched_numa_node(nodemask_t *visited, int start, unsigned int state);
+#else /* !CONFIG_NUMA */
static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
{
return cpumask_nth_and(cpu, cpus, cpu_online_mask);
}
+static inline int sched_numa_node(nodemask_t *visited, int start, unsigned int state)
+{
+ return MAX_NUMNODES;
+}
+
static inline const struct cpumask *
sched_numa_hop_mask(unsigned int node, unsigned int hops)
{
@@ -261,6 +267,29 @@ sched_numa_hop_mask(unsigned int node, unsigned int hops)
}
#endif /* CONFIG_NUMA */
+/**
+ * for_each_numa_node - iterate over NUMA nodes at increasing hop distances
+ * from a given starting node.
+ * @node: the iteration variable, representing the current NUMA node.
+ * @start: the NUMA node to start the iteration from.
+ * @visited: a nodemask_t to track the visited nodes.
+ * @state: state of NUMA nodes to iterate.
+ *
+ * This macro iterates over NUMA nodes in increasing distance from
+ * @start_node and yields MAX_NUMNODES when all the nodes have been
+ * visited.
+ *
+ * The difference between for_each_node() and for_each_numa_node() is that
+ * the former allows to iterate over nodes in no particular order, whereas
+ * the latter iterates over nodes in increasing order of distance.
+ *
+ * Requires rcu_lock to be held.
+ */
+#define for_each_numa_node(node, start, visited, state) \
+ for (node = start; \
+ node != MAX_NUMNODES; \
+ node = sched_numa_node(&(visited), start, state))
+
/**
* for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
* from a given node.
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index da33ec9e94ab2..e1d0a33415fb5 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2183,6 +2183,48 @@ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
}
EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
+/**
+ * sched_numa_node - Find the NUMA node at the closest distance from
+ * node @start.
+ *
+ * @visited: a pointer to a nodemask_t representing the visited nodes.
+ * @start: the node to start the search from.
+ * @state: the node state to filter nodes by.
+ *
+ * This function iterates over all nodes in the given state and calculates
+ * the distance to the starting node. It returns the node that is the
+ * closest in terms of distance that has not already been considered (not
+ * set in @visited and not the starting node). If the node is found, it is
+ * marked as visited in the @visited node mask.
+ *
+ * Returns the node ID closest in terms of hop distance from the @start
+ * node, or MAX_NUMNODES if no node is found (or all nodes have been
+ * visited).
+ */
+int sched_numa_node(nodemask_t *visited, int start, unsigned int state)
+{
+ int dist, n, min_node, min_dist;
+
+ min_node = MAX_NUMNODES;
+ min_dist = INT_MAX;
+
+ /* Find the nearest unvisted node */
+ for_each_node_state(n, state) {
+ if (n == start || node_isset(n, *visited))
+ continue;
+ dist = node_distance(start, n);
+ if (dist < min_dist) {
+ min_dist = dist;
+ min_node = n;
+ }
+ }
+ if (min_node != MAX_NUMNODES)
+ node_set(min_node, *visited);
+
+ return min_node;
+}
+EXPORT_SYMBOL_GPL(sched_numa_node);
+
/**
* sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
* @node
--
2.48.1
Powered by blists - more mailing lists