[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120113246.27987-3-kprateek.nayak@amd.com>
Date: Tue, 20 Jan 2026 11:32:40 +0000
From: K Prateek Nayak <kprateek.nayak@....com>
To: Ingo Molnar <mingo@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>, Vincent Guittot
<vincent.guittot@...aro.org>, <linux-kernel@...r.kernel.org>
CC: Dietmar Eggemann <dietmar.eggemann@....com>, Steven Rostedt
<rostedt@...dmis.org>, Ben Segall <bsegall@...gle.com>, Mel Gorman
<mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>, Chen Yu
<yu.c.chen@...el.com>, Shrikanth Hegde <sshegde@...ux.ibm.com>, "Gautham R.
Shenoy" <gautham.shenoy@....com>, K Prateek Nayak <kprateek.nayak@....com>
Subject: [PATCH v3 2/8] sched/topology: Allocate per-CPU sched_domain_shared in s_data
The "sched_domain_shared" object is allocated for every topology level
in __sdt_alloc() and is freed post sched domain rebuild if they aren't
assigned during sd_init().
"sd->shared" is only assigned for SD_SHARE_LLC domains and out of all
the assigned objects, only "sd_llc_shared" is ever used by the
scheduler.
Since only "sd_llc_shared" is ever used, and since SD_SHARE_LLC domains
never overlap, allocate only a single range of per-CPU
"sched_domain_shared" object with s_data instead of doing it per
topology level.
The subsequent commit uses the degeneration path to correctly assign the
"sd->shared" to the topmost SD_SHARE_LLC domain.
No functional changes are expected at this point.
Signed-off-by: K Prateek Nayak <kprateek.nayak@....com>
---
Changelog rfc v2..v3:
o Broke off from a single large patch. Previously
https://lore.kernel.org/lkml/20251208092744.32737-3-kprateek.nayak@amd.com/
---
kernel/sched/topology.c | 48 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 47 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 649674bb6c3c..623e8835d322 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -776,6 +776,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
}
struct s_data {
+ struct sched_domain_shared * __percpu *sds;
struct sched_domain * __percpu *sd;
struct root_domain *rd;
};
@@ -783,6 +784,7 @@ struct s_data {
enum s_alloc {
sa_rootdomain,
sa_sd,
+ sa_sd_shared,
sa_sd_storage,
sa_none,
};
@@ -1529,6 +1531,9 @@ static void set_domain_attribute(struct sched_domain *sd,
static void __sdt_free(const struct cpumask *cpu_map);
static int __sdt_alloc(const struct cpumask *cpu_map);
+static void __sds_free(struct s_data *d, const struct cpumask *cpu_map);
+static int __sds_alloc(struct s_data *d, const struct cpumask *cpu_map);
+
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
const struct cpumask *cpu_map)
{
@@ -1540,6 +1545,9 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
case sa_sd:
free_percpu(d->sd);
fallthrough;
+ case sa_sd_shared:
+ __sds_free(d, cpu_map);
+ fallthrough;
case sa_sd_storage:
__sdt_free(cpu_map);
fallthrough;
@@ -1555,9 +1563,11 @@ __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
if (__sdt_alloc(cpu_map))
return sa_sd_storage;
+ if (__sds_alloc(d, cpu_map))
+ return sa_sd_shared;
d->sd = alloc_percpu(struct sched_domain *);
if (!d->sd)
- return sa_sd_storage;
+ return sa_sd_shared;
d->rd = alloc_rootdomain();
if (!d->rd)
return sa_sd;
@@ -2458,6 +2468,42 @@ static void __sdt_free(const struct cpumask *cpu_map)
}
}
+static int __sds_alloc(struct s_data *d, const struct cpumask *cpu_map)
+{
+ int j;
+
+ d->sds = alloc_percpu(struct sched_domain_shared *);
+ if (!d->sds)
+ return -ENOMEM;
+
+ for_each_cpu(j, cpu_map) {
+ struct sched_domain_shared *sds;
+
+ sds = kzalloc_node(sizeof(struct sched_domain_shared),
+ GFP_KERNEL, cpu_to_node(j));
+ if (!sds)
+ return -ENOMEM;
+
+ *per_cpu_ptr(d->sds, j) = sds;
+ }
+
+ return 0;
+}
+
+static void __sds_free(struct s_data *d, const struct cpumask *cpu_map)
+{
+ int j;
+
+ if (!d->sds)
+ return;
+
+ for_each_cpu(j, cpu_map)
+ kfree(*per_cpu_ptr(d->sds, j));
+
+ free_percpu(d->sds);
+ d->sds = NULL;
+}
+
static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *child, int cpu)
--
2.34.1
Powered by blists - more mailing lists