[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200721113814.32284-10-srikar@linux.vnet.ibm.com>
Date: Tue, 21 Jul 2020 17:08:13 +0530
From: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
To: Michael Ellerman <michaele@....ibm.com>
Cc: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>,
linuxppc-dev <linuxppc-dev@...ts.ozlabs.org>,
LKML <linux-kernel@...r.kernel.org>,
Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Valentin Schneider <valentin.schneider@....com>,
Nick Piggin <npiggin@....ibm.com>,
Oliver OHalloran <oliveroh@....ibm.com>,
Nathan Lynch <nathanl@...ux.ibm.com>,
Michael Neuling <mikey@...ux.ibm.com>,
Anton Blanchard <anton@....ibm.com>,
Gautham R Shenoy <ego@...ux.vnet.ibm.com>,
Vaidyanathan Srinivasan <svaidy@...ux.ibm.com>,
Jordan Niethe <jniethe5@...il.com>
Subject: [PATCH v2 09/10] Powerpc/smp: Create coregroup domain
Add percpu coregroup maps and masks to create coregroup domain.
If a coregroup doesn't exist, the coregroup domain will be degenerated
in favour of SMT/CACHE domain.
Cc: linuxppc-dev <linuxppc-dev@...ts.ozlabs.org>
Cc: LKML <linux-kernel@...r.kernel.org>
Cc: Michael Ellerman <michaele@....ibm.com>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Valentin Schneider <valentin.schneider@....com>
Cc: Nick Piggin <npiggin@....ibm.com>
Cc: Oliver OHalloran <oliveroh@....ibm.com>
Cc: Nathan Lynch <nathanl@...ux.ibm.com>
Cc: Michael Neuling <mikey@...ux.ibm.com>
Cc: Anton Blanchard <anton@....ibm.com>
Cc: Gautham R Shenoy <ego@...ux.vnet.ibm.com>
Cc: Vaidyanathan Srinivasan <svaidy@...ux.ibm.com>
Cc: Jordan Niethe <jniethe5@...il.com>
Signed-off-by: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
---
Changelog v1 -> v2:
Powerpc/smp: Create coregroup domain
Moved coregroup topology fixup to fixup_topology (Gautham)
arch/powerpc/include/asm/topology.h | 10 ++++++++
arch/powerpc/kernel/smp.c | 38 +++++++++++++++++++++++++++++
arch/powerpc/mm/numa.c | 5 ++++
3 files changed, 53 insertions(+)
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index f0b6300e7dd3..6609174918ab 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -88,12 +88,22 @@ static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
extern int find_and_online_cpu_nid(int cpu);
+extern int cpu_to_coregroup_id(int cpu);
#else
static inline int find_and_online_cpu_nid(int cpu)
{
return 0;
}
+static inline int cpu_to_coregroup_id(int cpu)
+{
+#ifdef CONFIG_SMP
+ return cpu_to_core_id(cpu);
+#else
+ return 0;
+#endif
+}
+
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
#include <asm-generic/topology.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 97b762a1944a..a7e1366b7fd3 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -80,6 +80,7 @@ DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
@@ -91,6 +92,7 @@ enum {
smt_idx,
#endif
bigcore_idx,
+ mc_idx,
die_idx,
};
@@ -869,6 +871,21 @@ static const struct cpumask *smallcore_smt_mask(int cpu)
}
#endif
+static struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ return per_cpu(cpu_coregroup_map, cpu);
+}
+
+static bool has_coregroup_support(void)
+{
+ return coregroup_enabled;
+}
+
+static const struct cpumask *cpu_mc_mask(int cpu)
+{
+ return cpu_coregroup_mask(cpu);
+}
+
static const struct cpumask *cpu_bigcore_mask(int cpu)
{
return per_cpu(cpu_sibling_map, cpu);
@@ -879,6 +896,7 @@ static struct sched_domain_topology_level powerpc_topology[] = {
{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
#endif
{ cpu_bigcore_mask, SD_INIT_NAME(BIGCORE) },
+ { cpu_mc_mask, SD_INIT_NAME(MC) },
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
};
@@ -927,6 +945,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
GFP_KERNEL, node);
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
GFP_KERNEL, node);
+ if (has_coregroup_support())
+ zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
+ GFP_KERNEL, node);
+
#ifdef CONFIG_NEED_MULTIPLE_NODES
/*
* numa_node_id() works after this.
@@ -944,6 +966,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+ if (has_coregroup_support())
+ cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
+
init_big_cores();
if (has_big_cores) {
cpumask_set_cpu(boot_cpuid,
@@ -1235,6 +1260,8 @@ static void remove_cpu_from_masks(int cpu)
set_cpus_unrelated(cpu, i, cpu_sibling_mask);
if (has_big_cores)
set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
+ if (has_coregroup_support())
+ set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
}
}
#endif
@@ -1295,6 +1322,14 @@ static void add_cpu_to_masks(int cpu)
add_cpu_to_smallcore_masks(cpu);
update_mask_by_l2(cpu, cpu_l2_cache_mask);
+ if (has_coregroup_support()) {
+ cpumask_set_cpu(cpu, cpu_coregroup_mask(cpu));
+ for_each_cpu(i, cpu_online_mask) {
+ if (cpu_to_coregroup_id(cpu) == cpu_to_coregroup_id(i))
+ set_cpus_related(cpu, i, cpu_coregroup_mask);
+ }
+ }
+
if (pkg_id == -1) {
struct cpumask *(*mask)(int) = cpu_sibling_mask;
@@ -1386,6 +1421,9 @@ int setup_profiling_timer(unsigned int multiplier)
static void fixup_topology(void)
{
+ if (!has_coregroup_support())
+ powerpc_topology[mc_idx].mask = cpu_bigcore_mask;
+
if (shared_caches) {
pr_info("Using shared cache scheduler topology\n");
powerpc_topology[bigcore_idx].mask = shared_cache_mask;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3248160c0327..ef8aa580da21 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1216,6 +1216,11 @@ int find_and_online_cpu_nid(int cpu)
return new_nid;
}
+int cpu_to_coregroup_id(int cpu)
+{
+ return cpu_to_core_id(cpu);
+}
+
static int topology_update_init(void)
{
topology_inited = 1;
--
2.17.1
Powered by blists - more mailing lists