[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241217094156.577262-2-arighi@nvidia.com>
Date: Tue, 17 Dec 2024 10:32:26 +0100
From: Andrea Righi <arighi@...dia.com>
To: Tejun Heo <tj@...nel.org>,
David Vernet <void@...ifault.com>,
Changwoo Min <changwoo@...lia.com>
Cc: Yury Norov <yury.norov@...il.com>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 1/6] sched/topology: introduce for_each_numa_hop_node() / sched_numa_hop_node()
Introduce for_each_numa_hop_node() and sched_numa_hop_node() to iterate
over node IDs in order of increasing NUMA distance from a given starting
node.
These iterator functions are similar to for_each_numa_hop_mask() and
sched_numa_hop_mask(), but instead of providing a cpumask at each
iteration, they provide a node ID.
Example usage:
nodemask_t hop_nodes = NODE_MASK_NONE;
int start = cpu_to_node(smp_processor_id());
for_each_numa_hop_node(node, start, hop_nodes, N_ONLINE)
pr_info("node (%d, %d) -> \n",
start, node, node_distance(start, node);
Simulating the following NUMA topology in virtme-ng:
$ numactl -H
available: 4 nodes (0-3)
node 0 cpus: 0 1
node 0 size: 1006 MB
node 0 free: 928 MB
node 1 cpus: 2 3
node 1 size: 1007 MB
node 1 free: 986 MB
node 2 cpus: 4 5
node 2 size: 889 MB
node 2 free: 862 MB
node 3 cpus: 6 7
node 3 size: 1006 MB
node 3 free: 983 MB
node distances:
node 0 1 2 3
0: 10 51 31 41
1: 51 10 21 61
2: 31 21 10 11
3: 41 61 11 10
The output of the example above (on node 0) is the following:
[ 84.953644] node (0, 0) -> 10
[ 84.953712] node (0, 2) -> 31
[ 84.953764] node (0, 3) -> 41
[ 84.953817] node (0, 1) -> 51
Signed-off-by: Andrea Righi <arighi@...dia.com>
---
include/linux/topology.h | 28 ++++++++++++++++++++++-
kernel/sched/topology.c | 49 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 76 insertions(+), 1 deletion(-)
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 52f5850730b3..d9014d90580d 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -248,12 +248,18 @@ static inline const struct cpumask *cpu_cpu_mask(int cpu)
#ifdef CONFIG_NUMA
int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
-#else
+extern int sched_numa_hop_node(nodemask_t *hop_nodes, int start, unsigned int state);
+#else /* !CONFIG_NUMA */
static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
{
return cpumask_nth_and(cpu, cpus, cpu_online_mask);
}
+static inline int sched_numa_hop_node(nodemask_t *hop_nodes, int start, unsigned int state)
+{
+ return NUMA_NO_NODE;
+}
+
static inline const struct cpumask *
sched_numa_hop_mask(unsigned int node, unsigned int hops)
{
@@ -261,6 +267,26 @@ sched_numa_hop_mask(unsigned int node, unsigned int hops)
}
#endif /* CONFIG_NUMA */
+/**
+ * for_each_numa_hop_node - iterate over NUMA nodes at increasing hop distances
+ * from a given starting node.
+ * @__node: the iteration variable, representing the current NUMA node.
+ * @__start: the NUMA node to start the iteration from.
+ * @__hop_nodes: a nodemask_t to track the visited nodes.
+ * @__state: state of NUMA nodes to iterate.
+ *
+ * Requires rcu_lock to be held.
+ *
+ * This macro iterates over NUMA nodes in increasing distance from
+ * @start_node.
+ *
+ * Yields NUMA_NO_NODE when all the nodes have been visited.
+ */
+#define for_each_numa_hop_node(__node, __start, __hop_nodes, __state) \
+ for (int __node = __start; \
+ __node != NUMA_NO_NODE; \
+ __node = sched_numa_hop_node(&(__hop_nodes), __start, __state))
+
/**
* for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
* from a given node.
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 9748a4c8d668..8e77c235ad9a 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2185,6 +2185,55 @@ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
}
EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
+/**
+ * sched_numa_hop_node - Find the NUMA node at the closest hop distance
+ * from node @start.
+ *
+ * @hop_nodes: a pointer to a nodemask_t representing the visited nodes.
+ * @start: the NUMA node to start the hop search from.
+ * @state: the node state to filter nodes by.
+ *
+ * This function iterates over all NUMA nodes in the given state and
+ * calculates the hop distance to the starting node. It returns the NUMA
+ * node that is the closest in terms of hop distance
+ * that has not already been considered (not set in @hop_nodes). If the
+ * node is found, it is marked as considered in the @hop_nodes bitmask.
+ *
+ * The function checks if the node is not the start node and ensures it is
+ * not already part of the hop_nodes set. It then computes the distance to
+ * the start node using the node_distance() function. The closest node is
+ * chosen based on the minimum distance.
+ *
+ * Returns the NUMA node ID closest in terms of hop distance from the
+ * @start node, or NUMA_NO_NODE if no node is found (or all nodes have been
+ * visited).
+ */
+int sched_numa_hop_node(nodemask_t *hop_nodes, int start, unsigned int state)
+{
+ int dist, n, min_node, min_dist;
+
+ if (state >= NR_NODE_STATES)
+ return NUMA_NO_NODE;
+
+ min_node = NUMA_NO_NODE;
+ min_dist = INT_MAX;
+
+ for_each_node_state(n, state) {
+ if (n == start || node_isset(n, *hop_nodes))
+ continue;
+ dist = node_distance(start, n);
+ if (dist < min_dist) {
+ min_dist = dist;
+ min_node = n;
+ }
+ }
+ if (min_node != NUMA_NO_NODE)
+ node_set(min_node, *hop_nodes);
+
+ return min_node;
+}
+EXPORT_SYMBOL_GPL(sched_numa_hop_node);
+
/**
* sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
* @node
--
2.47.1
Powered by blists - more mailing lists