lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250313093746.6760-3-kprateek.nayak@amd.com>
Date: Thu, 13 Mar 2025 09:37:40 +0000
From: K Prateek Nayak <kprateek.nayak@....com>
To: Ingo Molnar <mingo@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
	Juri Lelli <juri.lelli@...hat.com>, Vincent Guittot
	<vincent.guittot@...aro.org>, Chen Yu <yu.c.chen@...el.com>,
	<linux-kernel@...r.kernel.org>
CC: Dietmar Eggemann <dietmar.eggemann@....com>, Steven Rostedt
	<rostedt@...dmis.org>, Ben Segall <bsegall@...gle.com>, Mel Gorman
	<mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>, David Vernet
	<void@...ifault.com>, "Gautham R. Shenoy" <gautham.shenoy@....com>, "Swapnil
 Sapkal" <swapnil.sapkal@....com>, Shrikanth Hegde <sshegde@...ux.ibm.com>, "K
 Prateek Nayak" <kprateek.nayak@....com>
Subject: [RFC PATCH 2/8] sched/topology: Introduce sg->shared

sched_group(s) of a particular sched_domain are created using the
sched_domain struct of the child domain. Attach the sched_domain_shared
struct from the corresponding child domain to the sched_group.

This shared struct will be used to propagate the sched group stats up
the sched domain hierarchy to optimize load balancing in subsequent
commits.

Signed-off-by: K Prateek Nayak <kprateek.nayak@....com>
---
 kernel/sched/sched.h    |  3 +++
 kernel/sched/topology.c | 27 +++++++++++++++++++++++++++
 2 files changed, 30 insertions(+)

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 023b844159c9..38aa4cba5d1f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2089,6 +2089,9 @@ struct sched_group {
 	int			asym_prefer_cpu;	/* CPU of highest priority in group */
 	int			flags;
 
+	/* sd->shared of the domain from which this group was created */
+	struct sched_domain_shared *shared;
+
 	/*
 	 * The CPUs this group covers.
 	 *
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 815474823b3f..508ee8aa492b 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -612,6 +612,23 @@ static struct root_domain *alloc_rootdomain(void)
 	return rd;
 }
 
+static void link_sg_shared(struct sched_group *sg, struct sched_domain_shared *sds)
+{
+	if (!sds)
+		return;
+
+	sg->shared = sds;
+	atomic_inc(&sds->ref);
+}
+
+static void free_sg_shared(struct sched_group *sg)
+{
+	if (sg->shared && atomic_dec_and_test(&sg->shared->ref))
+		kfree(sg->shared);
+
+	sg->shared = NULL;
+}
+
 static void free_sched_groups(struct sched_group *sg, int free_sgc)
 {
 	struct sched_group *tmp, *first;
@@ -626,6 +643,8 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc)
 		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
 			kfree(sg->sgc);
 
+		free_sg_shared(sg);
+
 		if (atomic_dec_and_test(&sg->ref))
 			kfree(sg);
 		sg = tmp;
@@ -746,6 +765,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
 			if (parent->parent) {
 				parent->parent->child = tmp;
 				parent->parent->groups->flags = tmp->flags;
+
+				free_sg_shared(parent->parent->groups);
+				link_sg_shared(parent->parent->groups, tmp->shared);
 			}
 
 			/*
@@ -773,6 +795,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
 			 * the child is being destroyed.
 			 */
 			do {
+				free_sg_shared(sg);
 				sg->flags = 0;
 			} while (sg != sd->groups);
 
@@ -972,10 +995,12 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
 	if (!sg)
 		return NULL;
 
+	sg->shared = NULL;
 	sg_span = sched_group_span(sg);
 	if (sd->child) {
 		cpumask_copy(sg_span, sched_domain_span(sd->child));
 		sg->flags = sd->child->flags;
+		link_sg_shared(sg, sd->child->shared);
 	} else {
 		cpumask_copy(sg_span, sched_domain_span(sd));
 	}
@@ -1225,9 +1250,11 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
 	if (already_visited)
 		return sg;
 
+	sg->shared = NULL;
 	if (child) {
 		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
 		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
+		link_sg_shared(sg, child->shared);
 		sg->flags = child->flags;
 	} else {
 		cpumask_set_cpu(cpu, sched_group_span(sg));
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ