[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1431459549-18343-17-git-send-email-morten.rasmussen@arm.com>
Date: Tue, 12 May 2015 20:38:51 +0100
From: Morten Rasmussen <morten.rasmussen@....com>
To: peterz@...radead.org, mingo@...hat.com
Cc: vincent.guittot@...aro.org,
Dietmar Eggemann <Dietmar.Eggemann@....com>,
yuyang.du@...el.com, preeti@...ux.vnet.ibm.com,
mturquette@...aro.org, rjw@...ysocki.net,
Juri Lelli <Juri.Lelli@....com>, sgurrappadi@...dia.com,
pang.xunlei@....com.cn, linux-kernel@...r.kernel.org,
linux-pm@...r.kernel.org, morten.rasmussen@....com
Subject: [RFCv4 PATCH 16/34] sched: Allocate and initialize energy data structures
From: Dietmar Eggemann <dietmar.eggemann@....com>
The per sched group sched_group_energy structure plus the related
idle_state and capacity_state arrays are allocated like the other sched
domain (sd) hierarchy data structures. This includes the freeing of
sched_group_energy structures which are not used.
Energy-aware scheduling allows that a system only has energy model data
up to a certain sd level (so called highest energy aware balancing sd
level). A check in init_sched_energy enforces that all sd's below this
sd level contain energy model data.
One problem is that the number of elements of the idle_state and the
capacity_state arrays is not fixed and has to be retrieved in
__sdt_alloc() to allocate memory for the sched_group_energy structure and
the two arrays in one chunk. The array pointers (idle_states and
cap_states) are initialized here to point to the correct place inside the
memory chunk.
The new function init_sched_energy() initializes the sched_group_energy
structure and the two arrays in case the sd topology level contains energy
information.
This patch has been tested with scheduler feature flag FORCE_SD_OVERLAP
enabled as well.
cc: Ingo Molnar <mingo@...hat.com>
cc: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@....com>
---
kernel/sched/core.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched/sched.h | 33 +++++++++++++++++++
2 files changed, 124 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bceb3a8..ab42515 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5723,6 +5723,9 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc)
if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
kfree(sg->sgc);
+ if (free_sgc && atomic_dec_and_test(&sg->sge->ref))
+ kfree(sg->sge);
+
kfree(sg);
sg = tmp;
} while (sg != first);
@@ -5740,6 +5743,7 @@ static void free_sched_domain(struct rcu_head *rcu)
free_sched_groups(sd->groups, 1);
} else if (atomic_dec_and_test(&sd->groups->ref)) {
kfree(sd->groups->sgc);
+ kfree(sd->groups->sge);
kfree(sd->groups);
}
kfree(sd);
@@ -5957,6 +5961,8 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
*/
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+ sg->sge = *per_cpu_ptr(sdd->sge, i);
+
/*
* Make sure the first group of this domain contains the
* canonical balance cpu. Otherwise the sched_domain iteration
@@ -5995,6 +6001,7 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
*sg = *per_cpu_ptr(sdd->sg, cpu);
(*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
+ (*sg)->sge = *per_cpu_ptr(sdd->sge, cpu);
}
return cpu;
@@ -6084,6 +6091,45 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
}
+static void init_sched_energy(int cpu, struct sched_domain *sd,
+ struct sched_domain_topology_level *tl)
+{
+ struct sched_group *sg = sd->groups;
+ struct sched_group_energy *energy = sg->sge;
+ sched_domain_energy_f fn = tl->energy;
+ struct cpumask *mask = sched_group_cpus(sg);
+
+ if (fn && sd->child && !sd->child->groups->sge) {
+ pr_err("BUG: EAS setup broken for CPU%d\n", cpu);
+#ifdef CONFIG_SCHED_DEBUG
+ pr_err(" energy data on %s but not on %s domain\n",
+ sd->name, sd->child->name);
+#endif
+ return;
+ }
+
+ if (cpu != group_balance_cpu(sg))
+ return;
+
+ if (!fn || !fn(cpu)) {
+ sg->sge = NULL;
+ return;
+ }
+
+ atomic_set(&sg->sge->ref, 1); /* for claim_allocations */
+
+
+ if (cpumask_weight(mask) > 1)
+ check_sched_energy_data(cpu, fn, mask);
+
+ energy->nr_idle_states = fn(cpu)->nr_idle_states;
+ memcpy(energy->idle_states, fn(cpu)->idle_states,
+ energy->nr_idle_states*sizeof(struct idle_state));
+ energy->nr_cap_states = fn(cpu)->nr_cap_states;
+ memcpy(energy->cap_states, fn(cpu)->cap_states,
+ energy->nr_cap_states*sizeof(struct capacity_state));
+}
+
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -6174,6 +6220,9 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
*per_cpu_ptr(sdd->sgc, cpu) = NULL;
+
+ if (atomic_read(&(*per_cpu_ptr(sdd->sge, cpu))->ref))
+ *per_cpu_ptr(sdd->sge, cpu) = NULL;
}
#ifdef CONFIG_NUMA
@@ -6639,10 +6688,24 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
if (!sdd->sgc)
return -ENOMEM;
+ sdd->sge = alloc_percpu(struct sched_group_energy *);
+ if (!sdd->sge)
+ return -ENOMEM;
+
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
struct sched_group_capacity *sgc;
+ struct sched_group_energy *sge;
+ sched_domain_energy_f fn = tl->energy;
+ unsigned int nr_idle_states = 0;
+ unsigned int nr_cap_states = 0;
+
+ if (fn && fn(j)) {
+ nr_idle_states = fn(j)->nr_idle_states;
+ nr_cap_states = fn(j)->nr_cap_states;
+ BUG_ON(!nr_idle_states || !nr_cap_states);
+ }
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
@@ -6666,6 +6729,26 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
return -ENOMEM;
*per_cpu_ptr(sdd->sgc, j) = sgc;
+
+ sge = kzalloc_node(sizeof(struct sched_group_energy) +
+ nr_idle_states*sizeof(struct idle_state) +
+ nr_cap_states*sizeof(struct capacity_state),
+ GFP_KERNEL, cpu_to_node(j));
+
+ if (!sge)
+ return -ENOMEM;
+
+ sge->idle_states = (struct idle_state *)
+ ((void *)&sge->cap_states +
+ sizeof(sge->cap_states));
+
+ sge->cap_states = (struct capacity_state *)
+ ((void *)&sge->cap_states +
+ sizeof(sge->cap_states) +
+ nr_idle_states*
+ sizeof(struct idle_state));
+
+ *per_cpu_ptr(sdd->sge, j) = sge;
}
}
@@ -6694,6 +6777,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
kfree(*per_cpu_ptr(sdd->sg, j));
if (sdd->sgc)
kfree(*per_cpu_ptr(sdd->sgc, j));
+ if (sdd->sge)
+ kfree(*per_cpu_ptr(sdd->sge, j));
}
free_percpu(sdd->sd);
sdd->sd = NULL;
@@ -6701,6 +6786,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
sdd->sg = NULL;
free_percpu(sdd->sgc);
sdd->sgc = NULL;
+ free_percpu(sdd->sge);
+ sdd->sge = NULL;
}
}
@@ -6786,10 +6873,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
/* Calculate CPU capacity for physical packages and nodes */
for (i = nr_cpumask_bits-1; i >= 0; i--) {
+ struct sched_domain_topology_level *tl = sched_domain_topology;
+
if (!cpumask_test_cpu(i, cpu_map))
continue;
- for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+ for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) {
+ init_sched_energy(i, sd, tl);
claim_allocations(i, sd);
init_sched_groups_capacity(i, sd);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index df20e23..ea32e9e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -890,6 +890,39 @@ static inline unsigned int group_first_cpu(struct sched_group *group)
extern int group_balance_cpu(struct sched_group *sg);
+/*
+ * Check that the per-cpu provided sd energy data is consistent for all cpus
+ * within the mask.
+ */
+static inline void check_sched_energy_data(int cpu, sched_domain_energy_f fn,
+ const struct cpumask *cpumask)
+{
+ struct cpumask mask;
+ int i;
+
+ cpumask_xor(&mask, cpumask, get_cpu_mask(cpu));
+
+ for_each_cpu(i, &mask) {
+ int y;
+
+ BUG_ON(fn(i)->nr_idle_states != fn(cpu)->nr_idle_states);
+
+ for (y = 0; y < (fn(i)->nr_idle_states); y++) {
+ BUG_ON(fn(i)->idle_states[y].power !=
+ fn(cpu)->idle_states[y].power);
+ }
+
+ BUG_ON(fn(i)->nr_cap_states != fn(cpu)->nr_cap_states);
+
+ for (y = 0; y < (fn(i)->nr_cap_states); y++) {
+ BUG_ON(fn(i)->cap_states[y].cap !=
+ fn(cpu)->cap_states[y].cap);
+ BUG_ON(fn(i)->cap_states[y].power !=
+ fn(cpu)->cap_states[y].power);
+ }
+ }
+}
+
#else
static inline void sched_ttwu_pending(void) { }
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists