[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1507553064.798991751@decadent.org.uk>
Date: Mon, 09 Oct 2017 13:44:24 +0100
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC: akpm@...ux-foundation.org,
"Linus Torvalds" <torvalds@...ux-foundation.org>,
"Ingo Molnar" <mingo@...nel.org>, "Mike Galbraith" <efault@....de>,
"Thomas Gleixner" <tglx@...utronix.de>,
"Peter Zijlstra" <peterz@...radead.org>
Subject: [PATCH 3.16 006/192] sched/topology: Fix overlapping
sched_group_capacity
3.16.49-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: Peter Zijlstra <peterz@...radead.org>
commit 1676330ecfa840113a37b25a49afda068380d19c upstream.
When building the overlapping groups we need to attach a consistent
sched_group_capacity structure. That is, all 'identical' sched_group's
should have the _same_ sched_group_capacity.
This can (once again) be demonstrated with a topology like:
node 0 1 2 3
0: 10 20 30 20
1: 20 10 20 30
2: 30 20 10 20
3: 20 30 20 10
But we need at least 2 CPUs per node for this to show up, after all,
if there is only one CPU per node, our CPU @i is per definition a
unique CPU that reaches this domain (aka balance-cpu).
Given the above NUMA topo and 2 CPUs per node:
[] CPU0 attaching sched-domain(s):
[] domain-0: span=0,4 level=DIE
[] groups: 0:{ span=0 }, 4:{ span=4 }
[] domain-1: span=0-1,3-5,7 level=NUMA
[] groups: 0:{ span=0,4 mask=0,4 cap=2048 }, 1:{ span=1,5 mask=1,5 cap=2048 }, 3:{ span=3,7 mask=3,7 cap=2048 }
[] domain-2: span=0-7 level=NUMA
[] groups: 0:{ span=0-1,3-5,7 mask=0,4 cap=6144 }, 2:{ span=1-3,5-7 mask=2,6 cap=6144 }
[] CPU1 attaching sched-domain(s):
[] domain-0: span=1,5 level=DIE
[] groups: 1:{ span=1 }, 5:{ span=5 }
[] domain-1: span=0-2,4-6 level=NUMA
[] groups: 1:{ span=1,5 mask=1,5 cap=2048 }, 2:{ span=2,6 mask=2,6 cap=2048 }, 4:{ span=0,4 mask=0,4 cap=2048 }
[] domain-2: span=0-7 level=NUMA
[] groups: 1:{ span=0-2,4-6 mask=1,5 cap=6144 }, 3:{ span=0,2-4,6-7 mask=3,7 cap=6144 }
Observe how CPU0-domain1-group0 and CPU1-domain1-group4 are the
'same' but have a different id (0 vs 4).
To fix this, use the group balance CPU to select the SGC. This means
we have to compute the full mask for each CPU and require a second
temporary mask to store the group mask in (it otherwise lives in the
SGC).
The fixed topology looks like:
[] CPU0 attaching sched-domain(s):
[] domain-0: span=0,4 level=DIE
[] groups: 0:{ span=0 }, 4:{ span=4 }
[] domain-1: span=0-1,3-5,7 level=NUMA
[] groups: 0:{ span=0,4 mask=0,4 cap=2048 }, 1:{ span=1,5 mask=1,5 cap=2048 }, 3:{ span=3,7 mask=3,7 cap=2048 }
[] domain-2: span=0-7 level=NUMA
[] groups: 0:{ span=0-1,3-5,7 mask=0,4 cap=6144 }, 2:{ span=1-3,5-7 mask=2,6 cap=6144 }
[] CPU1 attaching sched-domain(s):
[] domain-0: span=1,5 level=DIE
[] groups: 1:{ span=1 }, 5:{ span=5 }
[] domain-1: span=0-2,4-6 level=NUMA
[] groups: 1:{ span=1,5 mask=1,5 cap=2048 }, 2:{ span=2,6 mask=2,6 cap=2048 }, 0:{ span=0,4 mask=0,4 cap=2048 }
[] domain-2: span=0-7 level=NUMA
[] groups: 1:{ span=0-2,4-6 mask=1,5 cap=6144 }, 3:{ span=0,2-4,6-7 mask=3,7 cap=6144 }
Debugged-by: Lauro Ramos Venancio <lvenanci@...hat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Mike Galbraith <efault@....de>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: linux-kernel@...r.kernel.org
Fixes: e3589f6c81e4 ("sched: Allow for overlapping sched_domain spans")
Signed-off-by: Ingo Molnar <mingo@...nel.org>
[bwh: Backported to 3.16: adjust filename, context]
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
kernel/sched/core.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5298,6 +5298,7 @@ early_initcall(migration_init);
#ifdef CONFIG_SMP
static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
+cpumask_var_t sched_domains_tmpmask2;
#ifdef CONFIG_SCHED_DEBUG
@@ -5778,13 +5779,16 @@ enum s_alloc {
* Only CPUs that can arrive at this group should be considered to continue
* balancing.
*/
-static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+static void
+build_group_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
{
const struct cpumask *span = sched_domain_span(sd);
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;
+ cpumask_clear(mask);
+
for_each_cpu(i, span) {
sibling = *per_cpu_ptr(sdd->sd, i);
@@ -5800,11 +5804,11 @@ static void build_group_mask(struct sche
if (!cpumask_equal(span, sched_domain_span(sibling->child)))
continue;
- cpumask_set_cpu(i, sched_group_mask(sg));
+ cpumask_set_cpu(i, mask);
}
/* We must not have empty masks here */
- WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
+ WARN_ON_ONCE(cpumask_empty(mask));
}
/*
@@ -5838,14 +5842,19 @@ build_group_from_child_sched_domain(stru
}
static void init_overlap_sched_group(struct sched_domain *sd,
- struct sched_group *sg, int cpu)
+ struct sched_group *sg)
{
+ struct cpumask *mask = sched_domains_tmpmask2;
struct sd_data *sdd = sd->private;
struct cpumask *sg_span;
+ int cpu;
+
+ build_group_mask(sd, sg, mask);
+ cpu = cpumask_first_and(sched_group_cpus(sg), mask);
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
if (atomic_inc_return(&sg->sgc->ref) == 1)
- build_group_mask(sd, sg);
+ cpumask_copy(sched_group_mask(sg), mask);
/*
* Initialize sgc->capacity such that even if we mess up the
@@ -5888,7 +5897,7 @@ build_overlap_sched_groups(struct sched_
sg_span = sched_group_cpus(sg);
cpumask_or(covered, covered, sg_span);
- init_overlap_sched_group(sd, sg, i);
+ init_overlap_sched_group(sd, sg);
/*
* Make sure the first group of this domain contains the
@@ -7147,6 +7156,7 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
+ zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_NOWAIT);
/* May be allocated at isolcpus cmdline parse time */
if (cpu_isolated_map == NULL)
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Powered by blists - more mailing lists