lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <616f43d534c7c043220d032700ce72e4a7c740aa.camel@linux.intel.com>
Date: Mon, 08 Sep 2025 11:23:01 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: "Chen, Yu C" <yu.c.chen@...el.com>
Cc: Juri Lelli <juri.lelli@...hat.com>, Dietmar Eggemann
 <dietmar.eggemann@....com>, Ben Segall <bsegall@...gle.com>, Mel Gorman
 <mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>, Tim Chen
 <tim.c.chen@...el.com>, Vincent Guittot <vincent.guittot@...aro.org>, Libo
 Chen <libo.chen@...cle.com>, Abel Wu <wuyun.abel@...edance.com>, Len Brown
 <len.brown@...el.com>, linux-kernel@...r.kernel.org, K Prateek Nayak
 <kprateek.nayak@....com>, "Gautham R . Shenoy" <gautham.shenoy@....com>, 
 Zhao Liu <zhao1.liu@...el.com>, Vinicius Costa Gomes
 <vinicius.gomes@...el.com>, Arjan Van De Ven <arjan.van.de.ven@...el.com>,
 Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>
Subject: Re: [PATCH v2 1/2] sched: Create architecture specific sched domain
 distances

On Mon, 2025-09-08 at 00:28 +0800, Chen, Yu C wrote:
> On 9/6/2025 2:36 AM, Tim Chen wrote:

... snip ...
> > -void sched_init_numa(int offline_node)
> > +/*
> > + * Architecture could simplify NUMA distance, to avoid
> > + * creating too many NUMA levels.
> > + */
> > +int __weak arch_sched_node_distance(int from, int to)
> > +{
> > +	return node_distance(from, to);
> > +}
> > +
> > +static int numa_node_dist(int i, int j)
> > +{
> > +	return node_distance(i, j);
> > +}
> > +
> 
> numa_node_dist() seems to be used only once by
> sched_record_numa_dist(), would it be possible to
> use node_distance() directly
> sched_record_numa_dist(offline_node, node_distance, &distances,
> 				   &max_dist, &nr_node_levels))?

Otherwise I will need to pass a flag to sched_record_numa_dist to
choose which distance to use.  I am okay either way. Choosing
the current method so it makes sched_record_numa_dist() simpler.


> 
> > +static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
> > +		int **dist, int *maximum_dist, int *levels)
> > +
> >   {
> > -	struct sched_domain_topology_level *tl;
> >   	unsigned long *distance_map;
> >   	int nr_levels = 0;
> >   	int i, j;
> >   	int *distances;
> > -	struct cpumask ***masks;
> > +	int max_dist = 0;
> >   
> > 
... snip ...

> > +static int avg_remote_numa_distance(int offline_node)
> > +{
> > +	int i, j;
> > +	int distance, nr_remote = 0, total_distance = 0;
> > +
> > +	for_each_cpu_node_but(i, offline_node) {
> > +		for_each_cpu_node_but(j, offline_node) {
> > +			distance = node_distance(i, j);
> > +
> > +			if (distance >= REMOTE_DISTANCE) {
> > +				nr_remote++;
> > +				total_distance += distance;
> > +			}
> > +		}
> > +	}
> > +	if (nr_remote)
> > +		return total_distance / nr_remote;
> > +	else
> > +		return REMOTE_DISTANCE;
> > +}
> > +
> > +void sched_init_numa(int offline_node)
> > +{
> > +	struct sched_domain_topology_level *tl;
> > +	int nr_levels, nr_node_levels;
> > +	int i, j;
> > +	int *distances, *domain_distances;
> > +	int max_dist;
> > +	struct cpumask ***masks;
> > +
> > +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> > +				   &max_dist, &nr_node_levels))
> > +		return;
> > +
> > +	WRITE_ONCE(sched_avg_remote_numa_distance,
> > +		   avg_remote_numa_distance(offline_node));
> > +
> > +	if (sched_record_numa_dist(offline_node,
> > +				   arch_sched_node_distance, &domain_distances,
> > +				   NULL, &nr_levels)) {
> > +		kfree(distances);
> > +		return;
> > +	}
> > +	rcu_assign_pointer(sched_numa_node_distance, distances);
> > +	WRITE_ONCE(sched_numa_node_levels, nr_node_levels);
> > +
> >   	/*
> >   	 * 'nr_levels' contains the number of unique distances
> >   	 *
> > @@ -1954,6 +2028,8 @@ void sched_init_numa(int offline_node)
> >   	 *
> >   	 * We reset it to 'nr_levels' at the end of this function.
> >   	 */
> > +	rcu_assign_pointer(sched_domains_numa_distance, domain_distances);
> > +
> >   	sched_domains_numa_levels = 0;
> >   
> >   	masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
> > @@ -1979,10 +2055,13 @@ void sched_init_numa(int offline_node)
> >   			masks[i][j] = mask;
> >   
> >   			for_each_cpu_node_but(k, offline_node) {
> > -				if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
> > +				if (sched_debug() &&
> > +				    (arch_sched_node_distance(j, k) !=
> > +				     arch_sched_node_distance(k, j)))
> >   					sched_numa_warn("Node-distance not symmetric");
> >   
> > -				if (node_distance(j, k) > sched_domains_numa_distance[i])
> > +				if (arch_sched_node_distance(j, k) >
> > +					sched_domains_numa_distance[i])
> >   					continue;
> >   
> >   				cpumask_or(mask, mask, cpumask_of_node(k));
> > @@ -2022,7 +2101,7 @@ void sched_init_numa(int offline_node)
> >   	sched_domain_topology = tl;
> >   
> >   	sched_domains_numa_levels = nr_levels;
> > -	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
> > +	WRITE_ONCE(sched_max_numa_distance, max_dist);
> >   
> 
> Would it be possible to use
> WRITE_ONCE(sched_max_numa_distance, distance[nr_node_levels - 1]);
> so we can simplify the code by removing the introduced 'max_dist'
> both in sched_record_numa_dist() and sched_init_numa().

Sure, I think that simplifies sched_record_numa_dist().


Tim

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ