[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200727051805.16728-7-srikar@linux.vnet.ibm.com>
Date: Mon, 27 Jul 2020 10:48:01 +0530
From: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
To: Michael Ellerman <mpe@...erman.id.au>
Cc: linuxppc-dev <linuxppc-dev@...ts.ozlabs.org>,
Srikar Dronamraju <srikar@...ux.vnet.ibm.com>,
LKML <linux-kernel@...r.kernel.org>,
Nicholas Piggin <npiggin@...il.com>,
Anton Blanchard <anton@...abs.org>,
"Oliver O'Halloran" <oohall@...il.com>,
Nathan Lynch <nathanl@...ux.ibm.com>,
Michael Neuling <mikey@...ling.org>,
Gautham R Shenoy <ego@...ux.vnet.ibm.com>,
Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Valentin Schneider <valentin.schneider@....com>,
Jordan Niethe <jniethe5@...il.com>
Subject: [PATCH v4 06/10] powerpc/smp: Generalize 2nd sched domain
Currently "CACHE" domain happens to be the 2nd sched domain as per
powerpc_topology. This domain will collapse if cpumask of l2-cache is
same as SMT domain. However we could generalize this domain such that it
could mean either be a "CACHE" domain or a "BIGCORE" domain.
While setting up the "CACHE" domain, check if shared_cache is already
set.
Cc: linuxppc-dev <linuxppc-dev@...ts.ozlabs.org>
Cc: LKML <linux-kernel@...r.kernel.org>
Cc: Michael Ellerman <mpe@...erman.id.au>
Cc: Nicholas Piggin <npiggin@...il.com>
Cc: Anton Blanchard <anton@...abs.org>
Cc: Oliver O'Halloran <oohall@...il.com>
Cc: Nathan Lynch <nathanl@...ux.ibm.com>
Cc: Michael Neuling <mikey@...ling.org>
Cc: Gautham R Shenoy <ego@...ux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Valentin Schneider <valentin.schneider@....com>
Cc: Jordan Niethe <jniethe5@...il.com>
Signed-off-by: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
---
Changelog v1 -> v2:
Moved shared_cache topology fixup to fixup_topology (Gautham)
arch/powerpc/kernel/smp.c | 49 ++++++++++++++++++++++++++++-----------
1 file changed, 35 insertions(+), 14 deletions(-)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d997c7411664..3c5ccf6d2b1c 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -85,6 +85,14 @@ EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
EXPORT_SYMBOL_GPL(has_big_cores);
+enum {
+#ifdef CONFIG_SCHED_SMT
+ smt_idx,
+#endif
+ bigcore_idx,
+ die_idx,
+};
+
#define MAX_THREAD_LIST_SIZE 8
#define THREAD_GROUP_SHARE_L1 1
struct thread_groups {
@@ -851,13 +859,7 @@ static int powerpc_shared_cache_flags(void)
*/
static const struct cpumask *shared_cache_mask(int cpu)
{
- if (shared_caches)
- return cpu_l2_cache_mask(cpu);
-
- if (has_big_cores)
- return cpu_smallcore_mask(cpu);
-
- return per_cpu(cpu_sibling_map, cpu);
+ return per_cpu(cpu_l2_cache_map, cpu);
}
#ifdef CONFIG_SCHED_SMT
@@ -867,11 +869,16 @@ static const struct cpumask *smallcore_smt_mask(int cpu)
}
#endif
+static const struct cpumask *cpu_bigcore_mask(int cpu)
+{
+ return per_cpu(cpu_sibling_map, cpu);
+}
+
static struct sched_domain_topology_level powerpc_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
#endif
- { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
+ { cpu_bigcore_mask, SD_INIT_NAME(BIGCORE) },
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
};
@@ -1311,7 +1318,6 @@ static void add_cpu_to_masks(int cpu)
void start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
- struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
mmgrab(&init_mm);
current->active_mm = &init_mm;
@@ -1337,14 +1343,20 @@ void start_secondary(void *unused)
/* Update topology CPU masks */
add_cpu_to_masks(cpu);
- if (has_big_cores)
- sibling_mask = cpu_smallcore_mask;
/*
* Check for any shared caches. Note that this must be done on a
* per-core basis because one core in the pair might be disabled.
*/
- if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
- shared_caches = true;
+ if (!shared_caches) {
+ struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
+ struct cpumask *mask = cpu_l2_cache_mask(cpu);
+
+ if (has_big_cores)
+ sibling_mask = cpu_smallcore_mask;
+
+ if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
+ shared_caches = true;
+ }
set_numa_node(numa_cpu_lookup_table[cpu]);
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
@@ -1375,9 +1387,17 @@ static void fixup_topology(void)
#ifdef CONFIG_SCHED_SMT
if (has_big_cores) {
pr_info("Big cores detected but using small core scheduling\n");
- powerpc_topology[0].mask = smallcore_smt_mask;
+ powerpc_topology[smt_idx].mask = smallcore_smt_mask;
}
#endif
+ if (shared_caches) {
+ pr_info("Using shared cache scheduler topology\n");
+ powerpc_topology[bigcore_idx].mask = shared_cache_mask;
+ powerpc_topology[bigcore_idx].sd_flags = powerpc_shared_cache_flags;
+#ifdef CONFIG_SCHED_DEBUG
+ powerpc_topology[bigcore_idx].name = "CACHE";
+#endif
+ }
}
void __init smp_cpus_done(unsigned int max_cpus)
--
2.17.1
Powered by blists - more mailing lists