lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250915123738.GD3245006@noisy.programming.kicks-ass.net>
Date: Mon, 15 Sep 2025 14:37:38 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Tim Chen <tim.c.chen@...ux.intel.com>
Cc: Ingo Molnar <mingo@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
	Dietmar Eggemann <dietmar.eggemann@....com>,
	Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
	Valentin Schneider <vschneid@...hat.com>,
	Tim Chen <tim.c.chen@...el.com>,
	Vincent Guittot <vincent.guittot@...aro.org>,
	Libo Chen <libo.chen@...cle.com>,
	Abel Wu <wuyun.abel@...edance.com>, Len Brown <len.brown@...el.com>,
	linux-kernel@...r.kernel.org, Chen Yu <yu.c.chen@...el.com>,
	K Prateek Nayak <kprateek.nayak@....com>,
	"Gautham R . Shenoy" <gautham.shenoy@....com>,
	Zhao Liu <zhao1.liu@...el.com>,
	Vinicius Costa Gomes <vinicius.gomes@...el.com>,
	Arjan Van De Ven <arjan.van.de.ven@...el.com>
Subject: Re: [PATCH v3 1/2] sched: Create architecture specific sched domain
 distances

On Thu, Sep 11, 2025 at 11:30:56AM -0700, Tim Chen wrote:
> Allow architecture specific sched domain NUMA distances that can be
> modified from NUMA node distances for the purpose of building NUMA
> sched domains.
> 
> The actual NUMA distances are kept separately.  This allows for NUMA
> domain levels modification when building sched domains for specific
> architectures.
> 
> Consolidate the recording of unique NUMA distances in an array to
> sched_record_numa_dist() so the function can be reused to record NUMA
> distances when the NUMA distance metric is changed.
> 
> No functional change if there's no arch specific NUMA distances
> are being defined.

Keeping both metrics side-by-side is confusing -- and not very well
justified by the above.

Is there any appreciable benefit to mixing the two like this?

> 
> Co-developed-by: Vinicius Costa Gomes <vinicius.gomes@...el.com>
> Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@...el.com>
> Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
> ---
>  include/linux/sched/topology.h |   2 +
>  kernel/sched/topology.c        | 114 ++++++++++++++++++++++++++++-----
>  2 files changed, 99 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
> index 5263746b63e8..4f58e78ca52e 100644
> --- a/include/linux/sched/topology.h
> +++ b/include/linux/sched/topology.h
> @@ -59,6 +59,8 @@ static inline int cpu_numa_flags(void)
>  #endif
>  
>  extern int arch_asym_cpu_priority(int cpu);
> +extern int arch_sched_node_distance(int from, int to);
> +extern int sched_avg_remote_numa_distance;
>  
>  struct sched_domain_attr {
>  	int relax_domain_level;
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 977e133bb8a4..6c0ff62322cb 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -1591,10 +1591,13 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
>  enum numa_topology_type sched_numa_topology_type;
>  
>  static int			sched_domains_numa_levels;
> +static int			sched_numa_node_levels;
>  static int			sched_domains_curr_level;
>  
>  int				sched_max_numa_distance;
> +int				sched_avg_remote_numa_distance;
>  static int			*sched_domains_numa_distance;
> +static int			*sched_numa_node_distance;
>  static struct cpumask		***sched_domains_numa_masks;
>  #endif /* CONFIG_NUMA */
>  
> @@ -1808,10 +1811,10 @@ bool find_numa_distance(int distance)
>  		return true;
>  
>  	rcu_read_lock();
> -	distances = rcu_dereference(sched_domains_numa_distance);
> +	distances = rcu_dereference(sched_numa_node_distance);
>  	if (!distances)
>  		goto unlock;
> -	for (i = 0; i < sched_domains_numa_levels; i++) {
> +	for (i = 0; i < sched_numa_node_levels; i++) {
>  		if (distances[i] == distance) {
>  			found = true;
>  			break;

I'm assuming (because its not actually stated anywhere) that
sched_numa_$FOO is based on the SLIT table, while sched_domain_$FOO is
the modified thing.

And you're saying it makes a significant difference to
preferred_group_nid()?

> +static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
> +		int **dist, int *levels)
> +

That's a coding style fail; use cino=(0:0.

>  {
> -	struct sched_domain_topology_level *tl;
>  	unsigned long *distance_map;
>  	int nr_levels = 0;
>  	int i, j;
>  	int *distances;
> -	struct cpumask ***masks;
>  
>  	/*
>  	 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the
> @@ -1902,17 +1923,17 @@ void sched_init_numa(int offline_node)
>  	 */
>  	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
>  	if (!distance_map)
> -		return;
> +		return -ENOMEM;
>  
>  	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
>  	for_each_cpu_node_but(i, offline_node) {
>  		for_each_cpu_node_but(j, offline_node) {
> -			int distance = node_distance(i, j);
> +			int distance = n_dist(i, j);
>  
>  			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
>  				sched_numa_warn("Invalid distance value range");
>  				bitmap_free(distance_map);
> -				return;
> +				return -EINVAL;
>  			}
>  
>  			bitmap_set(distance_map, distance, 1);
> @@ -1927,17 +1948,66 @@ void sched_init_numa(int offline_node)
>  	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
>  	if (!distances) {
>  		bitmap_free(distance_map);
> -		return;
> +		return -ENOMEM;
>  	}
> -
>  	for (i = 0, j = 0; i < nr_levels; i++, j++) {
>  		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
>  		distances[i] = j;
>  	}
> -	rcu_assign_pointer(sched_domains_numa_distance, distances);
> +	*dist = distances;
> +	*levels = nr_levels;
>  
>  	bitmap_free(distance_map);
>  
> +	return 0;
> +}
> +
> +static int avg_remote_numa_distance(int offline_node)
> +{
> +	int i, j;
> +	int distance, nr_remote = 0, total_distance = 0;
> +
> +	for_each_cpu_node_but(i, offline_node) {
> +		for_each_cpu_node_but(j, offline_node) {
> +			distance = node_distance(i, j);
> +
> +			if (distance >= REMOTE_DISTANCE) {
> +				nr_remote++;
> +				total_distance += distance;
> +			}
> +		}
> +	}
> +	if (nr_remote)
> +		return total_distance / nr_remote;
> +	else
> +		return REMOTE_DISTANCE;
> +}
> +
> +void sched_init_numa(int offline_node)
> +{
> +	struct sched_domain_topology_level *tl;
> +	int nr_levels, nr_node_levels;
> +	int i, j;
> +	int *distances, *domain_distances;
> +	struct cpumask ***masks;
> +
> +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> +				   &nr_node_levels))
> +		return;
> +
> +	WRITE_ONCE(sched_avg_remote_numa_distance,
> +		   avg_remote_numa_distance(offline_node));

What is the point of all this? sched_avg_remote_numa_distance isn't
actually used anywhere. I'm thinking it doesn't want to be in this patch
at the very least.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ