[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1541767840-93588-3-git-send-email-steven.sistare@oracle.com>
Date: Fri, 9 Nov 2018 04:50:32 -0800
From: Steve Sistare <steven.sistare@...cle.com>
To: mingo@...hat.com, peterz@...radead.org
Cc: subhra.mazumdar@...cle.com, dhaval.giani@...cle.com,
daniel.m.jordan@...cle.com, pavel.tatashin@...rosoft.com,
matt@...eblueprint.co.uk, umgwanakikbuti@...il.com,
riel@...hat.com, jbacik@...com, juri.lelli@...hat.com,
valentin.schneider@....com, vincent.guittot@...aro.org,
quentin.perret@....com, steven.sistare@...cle.com,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 02/10] sched/topology: Provide hooks to allocate data shared per LLC
Add functions sd_llc_alloc_all() and sd_llc_free_all() to allocate and
free data pointed to by struct sched_domain_shared at the last-level-cache
domain. sd_llc_alloc_all() is called after the SD hierarchy is known, to
eliminate the unnecessary allocations that would occur if we instead
allocated in __sdt_alloc() and then figured out which shared nodes are
redundant.
Signed-off-by: Steve Sistare <steven.sistare@...cle.com>
---
kernel/sched/topology.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 74 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8d7f15b..3e72ce0 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -10,6 +10,12 @@
static cpumask_var_t sched_domains_tmpmask;
static cpumask_var_t sched_domains_tmpmask2;
+struct s_data;
+static int sd_llc_alloc(struct sched_domain *sd);
+static void sd_llc_free(struct sched_domain *sd);
+static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d);
+static void sd_llc_free_all(const struct cpumask *cpu_map);
+
#ifdef CONFIG_SCHED_DEBUG
static int __init sched_debug_setup(char *str)
@@ -361,8 +367,10 @@ static void destroy_sched_domain(struct sched_domain *sd)
*/
free_sched_groups(sd->groups, 1);
- if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
+ if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) {
+ sd_llc_free(sd);
kfree(sd->shared);
+ }
kfree(sd);
}
@@ -996,6 +1004,7 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
free_percpu(d->sd);
/* Fall through */
case sa_sd_storage:
+ sd_llc_free_all(cpu_map);
__sdt_free(cpu_map);
/* Fall through */
case sa_none:
@@ -1610,6 +1619,62 @@ static void __sdt_free(const struct cpumask *cpu_map)
}
}
+static int sd_llc_alloc(struct sched_domain *sd)
+{
+ /* Allocate sd->shared data here. Empty for now. */
+
+ return 0;
+}
+
+static void sd_llc_free(struct sched_domain *sd)
+{
+ struct sched_domain_shared *sds = sd->shared;
+
+ if (!sds)
+ return;
+
+ /* Free data here. Empty for now. */
+}
+
+static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d)
+{
+ struct sched_domain *sd, *hsd;
+ int i;
+
+ for_each_cpu(i, cpu_map) {
+ /* Find highest domain that shares resources */
+ hsd = NULL;
+ for (sd = *per_cpu_ptr(d->sd, i); sd; sd = sd->parent) {
+ if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
+ break;
+ hsd = sd;
+ }
+ if (hsd && sd_llc_alloc(hsd))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void sd_llc_free_all(const struct cpumask *cpu_map)
+{
+ struct sched_domain_topology_level *tl;
+ struct sched_domain *sd;
+ struct sd_data *sdd;
+ int j;
+
+ for_each_sd_topology(tl) {
+ sdd = &tl->data;
+ if (!sdd)
+ continue;
+ for_each_cpu(j, cpu_map) {
+ sd = *per_cpu_ptr(sdd->sd, j);
+ if (sd)
+ sd_llc_free(sd);
+ }
+ }
+}
+
static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *child, int dflags, int cpu)
@@ -1769,6 +1834,14 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
}
}
+ /*
+ * Allocate shared sd data at last level cache. Must be done after
+ * domains are built above, but before the data is used in
+ * cpu_attach_domain and descendants below.
+ */
+ if (sd_llc_alloc_all(cpu_map, &d))
+ goto error;
+
/* Attach the domains */
rcu_read_lock();
for_each_cpu(i, cpu_map) {
--
1.8.3.1
Powered by blists - more mailing lists