lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260122161647.142704-2-realwujing@gmail.com>
Date: Thu, 22 Jan 2026 11:16:47 -0500
From: Qiliang Yuan <realwujing@...il.com>
To: mingo@...hat.com,
	peterz@...radead.org,
	juri.lelli@...hat.com,
	vincent.guittot@...aro.org
Cc: dietmar.eggemann@....com,
	rostedt@...dmis.org,
	bsegall@...gle.com,
	mgorman@...e.de,
	vschneid@...hat.com,
	linux-kernel@...r.kernel.org,
	yuanql9@...natelecom.cn,
	Qiliang Yuan <realwujing@...il.com>
Subject: [PATCH] sched/numa: Optimize NUMA placement algorithm complexity from O(Nodes) to O(Active_Nodes)

On systems with a large number of NUMA nodes, periodic scanning of all online
nodes for fault decay and task placement becomes a bottleneck.

This patch introduces 'numa_faults_nodes_mask' in task_struct to track nodes
where the task has actually incurred faults. By replacing for_each_online_node()
with for_each_node_mask(), we reduce the search space and decay overhead,
especially for tasks whose memory footprint is localized to a few nodes.

Signed-off-by: Qiliang Yuan <realwujing@...il.com>
Signed-off-by: Qiliang Yuan <yuanql9@...natelecom.cn>
---
 include/linux/sched.h |  1 +
 kernel/sched/fair.c   | 18 +++++++++++++++---
 2 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index d395f2810fac..2c426e10c9d5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1397,6 +1397,7 @@ struct task_struct {
 	 */
 	unsigned long			*numa_faults;
 	unsigned long			total_numa_faults;
+	nodemask_t			numa_faults_nodes_mask;
 
 	/*
 	 * numa_faults_locality tracks if faults recorded during the last
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e71302282671..44cf35c43684 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2538,8 +2538,9 @@ static int task_numa_migrate(struct task_struct *p)
 	 */
 	ng = deref_curr_numa_group(p);
 	if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
-		for_each_node_state(nid, N_CPU) {
-			if (nid == env.src_nid || nid == p->numa_preferred_nid)
+		for_each_node_mask(nid, p->numa_faults_nodes_mask) {
+			if (nid == env.src_nid || nid == p->numa_preferred_nid ||
+			    !node_state(nid, N_CPU))
 				continue;
 
 			dist = node_distance(env.src_nid, env.dst_nid);
@@ -2892,11 +2893,12 @@ static void task_numa_placement(struct task_struct *p)
 	}
 
 	/* Find the node with the highest number of faults */
-	for_each_online_node(nid) {
+	for_each_node_mask(nid, p->numa_faults_nodes_mask) {
 		/* Keep track of the offsets in numa_faults array */
 		int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
 		unsigned long faults = 0, group_faults = 0;
 		int priv;
+		bool node_has_faults = false;
 
 		for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
 			long diff, f_diff, f_weight;
@@ -2928,6 +2930,10 @@ static void task_numa_placement(struct task_struct *p)
 			p->numa_faults[cpu_idx] += f_diff;
 			faults += p->numa_faults[mem_idx];
 			p->total_numa_faults += diff;
+
+			if (p->numa_faults[mem_idx] || p->numa_faults[cpu_idx])
+				node_has_faults = true;
+
 			if (ng) {
 				/*
 				 * safe because we can only change our own group
@@ -2952,6 +2958,9 @@ static void task_numa_placement(struct task_struct *p)
 			max_faults = group_faults;
 			max_nid = nid;
 		}
+
+		if (!node_has_faults)
+			node_clear(nid, p->numa_faults_nodes_mask);
 	}
 
 	/* Cannot migrate task to CPU-less node */
@@ -3209,6 +3218,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 
 	p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
 	p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
+	node_set(mem_node, p->numa_faults_nodes_mask);
+	node_set(cpu_node, p->numa_faults_nodes_mask);
 	p->numa_faults_locality[local] += pages;
 }
 
@@ -3545,6 +3556,7 @@ void init_numa_balancing(u64 clone_flags, struct task_struct *p)
 	/* Protect against double add, see task_tick_numa and task_numa_work */
 	p->numa_work.next		= &p->numa_work;
 	p->numa_faults			= NULL;
+	nodes_clear(p->numa_faults_nodes_mask);
 	p->numa_pages_migrated		= 0;
 	p->total_numa_faults		= 0;
 	RCU_INIT_POINTER(p->numa_group, NULL);
-- 
2.51.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ