[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1492717903-5195-5-git-send-email-lvenanci@redhat.com>
Date: Thu, 20 Apr 2017 16:51:43 -0300
From: Lauro Ramos Venancio <lvenanci@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: lwang@...hat.com, riel@...hat.com, Mike Galbraith <efault@....de>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...nel.org>, linux-kernel@...r.kernel.org,
Lauro Ramos Venancio <lvenanci@...hat.com>
Subject: [PATCH 4/4] sched/topology: the group balance cpu must be a cpu where the group is installed
An overlap sched group may not be installed in all cpus that compose the
group. Currently, the group balance cpu may be a cpu where the group is
not installed, causing two problems:
1) Two groups may have the same group balance cpu and, as consequence,
share the sched_group_capacity.
2) should_we_balance() in fair.c may never return true.
This patch changes the group mask meaning to mark all the cpus where a
group is installed.
Signed-off-by: Lauro Ramos Venancio <lvenanci@...hat.com>
---
kernel/sched/topology.c | 40 ++++++++++++++++++++++++++++++++++------
1 file changed, 34 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index e77c93a..694e799 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -493,8 +493,10 @@ enum s_alloc {
};
/*
- * Build an iteration mask that can exclude certain CPUs from the upwards
- * domain traversal.
+ * An overlap sched group may not be installed in all CPUs that compose the
+ * group. So build the mask, marking all the CPUs where the group is installed.
+ *
+ * This function can only be used when all the groups are already built.
*/
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
{
@@ -505,7 +507,11 @@ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i);
- if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+
+ if (!sibling->groups)
+ continue;
+
+ if (!cpumask_equal(sg_span, sched_group_cpus(sibling->groups)))
continue;
cpumask_set_cpu(i, sched_group_mask(sg));
@@ -523,6 +529,7 @@ int group_balance_cpu(struct sched_group *sg)
/*
* Find the group balance cpu when the group mask is not available yet.
+ * This function can only be used when all the groups are already built.
*/
static int find_group_balance_cpu(struct sched_domain *sd,
struct sched_group *sg)
@@ -534,7 +541,11 @@ static int find_group_balance_cpu(struct sched_domain *sd,
for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i);
- if (cpumask_test_cpu(i, sched_domain_span(sibling)))
+
+ if (!sibling->groups)
+ continue;
+
+ if (cpumask_equal(sg_span, sched_group_cpus(sibling->groups)))
return i;
}
@@ -584,6 +595,17 @@ static void init_overlap_sched_group(struct sched_domain *sd,
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
}
+static void init_overlap_sched_groups(struct sched_domain *sd)
+{
+ struct sched_group *sg = sd->groups;
+
+ do {
+ init_overlap_sched_group(sd, sg);
+
+ sg = sg->next;
+ } while (sg != sd->groups);
+}
+
static int
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
{
@@ -624,8 +646,6 @@ static void init_overlap_sched_group(struct sched_domain *sd,
sg_span = sched_group_cpus(sg);
cpumask_or(covered, covered, sg_span);
- init_overlap_sched_group(sd, sg);
-
if (!first)
first = sg;
if (last)
@@ -1482,6 +1502,14 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
}
}
+ /* Init overlap groups */
+ for_each_cpu(i, cpu_map) {
+ for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+ if (sd->flags & SD_OVERLAP)
+ init_overlap_sched_groups(sd);
+ }
+ }
+
/* Calculate CPU capacity for physical packages and nodes */
for (i = nr_cpumask_bits-1; i >= 0; i--) {
if (!cpumask_test_cpu(i, cpu_map))
--
1.8.3.1
Powered by blists - more mailing lists