[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e11163c7-9e23-4556-9a3a-962222978686@amd.com>
Date: Fri, 12 Sep 2025 08:53:29 +0530
From: K Prateek Nayak <kprateek.nayak@....com>
To: Tim Chen <tim.c.chen@...ux.intel.com>, Peter Zijlstra
<peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>
CC: Juri Lelli <juri.lelli@...hat.com>, Dietmar Eggemann
<dietmar.eggemann@....com>, Ben Segall <bsegall@...gle.com>, Mel Gorman
<mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>, Tim Chen
<tim.c.chen@...el.com>, Vincent Guittot <vincent.guittot@...aro.org>, "Libo
Chen" <libo.chen@...cle.com>, Abel Wu <wuyun.abel@...edance.com>, Len Brown
<len.brown@...el.com>, <linux-kernel@...r.kernel.org>, Chen Yu
<yu.c.chen@...el.com>, "Gautham R . Shenoy" <gautham.shenoy@....com>, "Zhao
Liu" <zhao1.liu@...el.com>, Vinicius Costa Gomes <vinicius.gomes@...el.com>,
Arjan Van De Ven <arjan.van.de.ven@...el.com>
Subject: Re: [PATCH v3 1/2] sched: Create architecture specific sched domain
distances
Hello Tim,
On 9/12/2025 12:00 AM, Tim Chen wrote:
> +static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
> + int **dist, int *levels)
> +
nit. Is the blank line above intentional?
Also personally I prefer breaking the two lines above as:
static int
sched_record_numa_dist(int offline_node, int (*n_dist)(int, int), int **dist, int *levels)
{
...
}
> {
> - struct sched_domain_topology_level *tl;
> unsigned long *distance_map;
Since we are breaking this out and adding return values, can we also
cleanup that bitmap_free() before every return with __free(bitmap) like:
(Only build tested)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 6c0ff62322cb..baa79e79ced8 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1910,9 +1910,8 @@ static int numa_node_dist(int i, int j)
static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
int **dist, int *levels)
-
{
- unsigned long *distance_map;
+ unsigned long *distance_map __free(bitmap) = NULL;
int nr_levels = 0;
int i, j;
int *distances;
@@ -1932,7 +1931,6 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
sched_numa_warn("Invalid distance value range");
- bitmap_free(distance_map);
return -EINVAL;
}
@@ -1946,19 +1944,17 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
- if (!distances) {
- bitmap_free(distance_map);
+ if (!distances)
return -ENOMEM;
- }
+
for (i = 0, j = 0; i < nr_levels; i++, j++) {
j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
distances[i] = j;
}
+
*dist = distances;
*levels = nr_levels;
- bitmap_free(distance_map);
-
return 0;
}
---
> int nr_levels = 0;
> int i, j;
> int *distances;
> - struct cpumask ***masks;
>
> /*
> * O(nr_nodes^2) de-duplicating selection sort -- in order to find the
> @@ -1902,17 +1923,17 @@ void sched_init_numa(int offline_node)
> */
> distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
> if (!distance_map)
> - return;
> + return -ENOMEM;
>
> bitmap_zero(distance_map, NR_DISTANCE_VALUES);
> for_each_cpu_node_but(i, offline_node) {
> for_each_cpu_node_but(j, offline_node) {
> - int distance = node_distance(i, j);
> + int distance = n_dist(i, j);
>
> if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
> sched_numa_warn("Invalid distance value range");
> bitmap_free(distance_map);
> - return;
> + return -EINVAL;
> }
>
> bitmap_set(distance_map, distance, 1);
> @@ -1927,17 +1948,66 @@ void sched_init_numa(int offline_node)
> distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
> if (!distances) {
> bitmap_free(distance_map);
> - return;
> + return -ENOMEM;
> }
> -
> for (i = 0, j = 0; i < nr_levels; i++, j++) {
> j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
> distances[i] = j;
> }
> - rcu_assign_pointer(sched_domains_numa_distance, distances);
> + *dist = distances;
> + *levels = nr_levels;
>
> bitmap_free(distance_map);
>
> + return 0;
> +}
> +
> +static int avg_remote_numa_distance(int offline_node)
> +{
> + int i, j;
> + int distance, nr_remote = 0, total_distance = 0;
> +
> + for_each_cpu_node_but(i, offline_node) {
> + for_each_cpu_node_but(j, offline_node) {
> + distance = node_distance(i, j);
> +
> + if (distance >= REMOTE_DISTANCE) {
> + nr_remote++;
> + total_distance += distance;
> + }
> + }
> + }
> + if (nr_remote)
> + return total_distance / nr_remote;
> + else
> + return REMOTE_DISTANCE;
> +}
> +
> +void sched_init_numa(int offline_node)
> +{
> + struct sched_domain_topology_level *tl;
> + int nr_levels, nr_node_levels;
> + int i, j;
> + int *distances, *domain_distances;
> + struct cpumask ***masks;
> +
> + if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> + &nr_node_levels))
> + return;
> +
> + WRITE_ONCE(sched_avg_remote_numa_distance,
> + avg_remote_numa_distance(offline_node));
nit.
Can add a small comment here saying arch_sched_node_distance() may
depend on sched_avg_remote_numa_distance and requires it to be
initialized correctly before computing domain_distances.
Apart from those nitpicks, the changes look good to me. Please feel free
to include:
Reviewed-by: K Prateek Nayak <kprateek.nayak@....com>
--
Thanks and Regards,
Prateek
> +
> + if (sched_record_numa_dist(offline_node,
> + arch_sched_node_distance, &domain_distances,
> + &nr_levels)) {
> + kfree(distances);
> + return;
> + }
> + rcu_assign_pointer(sched_numa_node_distance, distances);
> + WRITE_ONCE(sched_max_numa_distance, distances[nr_node_levels - 1]);
> + WRITE_ONCE(sched_numa_node_levels, nr_node_levels);
> +
> /*
> * 'nr_levels' contains the number of unique distances
> *
Powered by blists - more mailing lists