lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250409111539.23791-3-kprateek.nayak@amd.com>
Date: Wed, 9 Apr 2025 11:15:36 +0000
From: K Prateek Nayak <kprateek.nayak@....com>
To: Ingo Molnar <mingo@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
	Juri Lelli <juri.lelli@...hat.com>, Vincent Guittot
	<vincent.guittot@...aro.org>, <linux-kernel@...r.kernel.org>
CC: Dietmar Eggemann <dietmar.eggemann@....com>, Steven Rostedt
	<rostedt@...dmis.org>, Ben Segall <bsegall@...gle.com>, Mel Gorman
	<mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>, "Gautham R.
 Shenoy" <gautham.shenoy@....com>, Swapnil Sapkal <swapnil.sapkal@....com>, "K
 Prateek Nayak" <kprateek.nayak@....com>
Subject: [RFC PATCH 2/5] sched/fair: Introduce overloaded_mask in sched_domain_shared

Introduce a new cpumask member "overloaded_mask" in sched_domain_shared.
This mask will be used to keep track of overloaded CPUs with pushable
tasks on them and will be later used by newidle balance to only scan
through the overloaded CPUs to pull a task to it.

Signed-off-by: K Prateek Nayak <kprateek.nayak@....com>
---
 include/linux/sched/topology.h |  1 +
 kernel/sched/topology.c        | 25 ++++++++++++++++++-------
 2 files changed, 19 insertions(+), 7 deletions(-)

diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 7b4301b7235f..2fc3794fd719 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -78,6 +78,7 @@ struct sched_domain_shared {
 	atomic_t	nr_busy_cpus;
 	int		has_idle_cores;
 	int		nr_idle_scan;
+	cpumask_var_t	overloaded_mask;
 };
 
 struct sched_domain {
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index bbc2fc2c7c22..6b1ef953b571 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -638,8 +638,10 @@ static void destroy_sched_domain(struct sched_domain *sd)
 	 */
 	free_sched_groups(sd->groups, 1);
 
-	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
+	if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) {
+		free_cpumask_var(sd->shared->overloaded_mask);
 		kfree(sd->shared);
+	}
 	kfree(sd);
 }
 
@@ -2239,27 +2241,31 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
 			return -ENOMEM;
 
 		for_each_cpu(j, cpu_map) {
+			int node = cpu_to_node(j);
 			struct sched_domain *sd;
 			struct sched_domain_shared *sds;
 			struct sched_group *sg;
 			struct sched_group_capacity *sgc;
 
 			sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
-					GFP_KERNEL, cpu_to_node(j));
+					GFP_KERNEL, node);
 			if (!sd)
 				return -ENOMEM;
 
 			*per_cpu_ptr(sdd->sd, j) = sd;
 
 			sds = kzalloc_node(sizeof(struct sched_domain_shared),
-					GFP_KERNEL, cpu_to_node(j));
+					GFP_KERNEL, node);
 			if (!sds)
 				return -ENOMEM;
 
+			if (!zalloc_cpumask_var_node(&sds->overloaded_mask, GFP_KERNEL, node))
+				return -ENOMEM;
+
 			*per_cpu_ptr(sdd->sds, j) = sds;
 
 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
-					GFP_KERNEL, cpu_to_node(j));
+					GFP_KERNEL, node);
 			if (!sg)
 				return -ENOMEM;
 
@@ -2268,7 +2274,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
 			*per_cpu_ptr(sdd->sg, j) = sg;
 
 			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
-					GFP_KERNEL, cpu_to_node(j));
+					GFP_KERNEL, node);
 			if (!sgc)
 				return -ENOMEM;
 
@@ -2299,8 +2305,13 @@ static void __sdt_free(const struct cpumask *cpu_map)
 				kfree(*per_cpu_ptr(sdd->sd, j));
 			}
 
-			if (sdd->sds)
-				kfree(*per_cpu_ptr(sdd->sds, j));
+			if (sdd->sds) {
+				struct sched_domain_shared *sds = *per_cpu_ptr(sdd->sds, j);
+
+				if (sds)
+					free_cpumask_var(sds->overloaded_mask);
+				kfree(sds);
+			}
 			if (sdd->sg)
 				kfree(*per_cpu_ptr(sdd->sg, j));
 			if (sdd->sgc)
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ