[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200724070629.GA21415@in.ibm.com>
Date: Fri, 24 Jul 2020 12:36:29 +0530
From: Gautham R Shenoy <ego@...ux.vnet.ibm.com>
To: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
Cc: Michael Ellerman <mpe@...erman.id.au>,
linuxppc-dev <linuxppc-dev@...ts.ozlabs.org>,
LKML <linux-kernel@...r.kernel.org>,
Nicholas Piggin <npiggin@...il.com>,
Anton Blanchard <anton@...abs.org>,
"Oliver O'Halloran" <oohall@...il.com>,
Nathan Lynch <nathanl@...ux.ibm.com>,
Michael Neuling <mikey@...ling.org>,
Gautham R Shenoy <ego@...ux.vnet.ibm.com>,
Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Valentin Schneider <valentin.schneider@....com>,
Jordan Niethe <jniethe5@...il.com>
Subject: Re: [PATCH v3 02/10] powerpc/smp: Merge Power9 topology with Power
topology
On Thu, Jul 23, 2020 at 02:21:08PM +0530, Srikar Dronamraju wrote:
> A new sched_domain_topology_level was added just for Power9. However the
> same can be achieved by merging powerpc_topology with power9_topology
> and makes the code more simpler especially when adding a new sched
> domain.
>
> Cc: linuxppc-dev <linuxppc-dev@...ts.ozlabs.org>
> Cc: LKML <linux-kernel@...r.kernel.org>
> Cc: Michael Ellerman <mpe@...erman.id.au>
> Cc: Nicholas Piggin <npiggin@...il.com>
> Cc: Anton Blanchard <anton@...abs.org>
> Cc: Oliver O'Halloran <oohall@...il.com>
> Cc: Nathan Lynch <nathanl@...ux.ibm.com>
> Cc: Michael Neuling <mikey@...ling.org>
> Cc: Gautham R Shenoy <ego@...ux.vnet.ibm.com>
> Cc: Ingo Molnar <mingo@...nel.org>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Valentin Schneider <valentin.schneider@....com>
> Cc: Jordan Niethe <jniethe5@...il.com>
> Signed-off-by: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
LGTM.
Reviewed-by: Gautham R. Shenoy <ego@...ux.vnet.ibm.com>
> ---
> Changelog v1 -> v2:
> Replaced a reference to cpu_smt_mask with per_cpu(cpu_sibling_map, cpu)
> since cpu_smt_mask is only defined under CONFIG_SCHED_SMT
>
> arch/powerpc/kernel/smp.c | 33 ++++++++++-----------------------
> 1 file changed, 10 insertions(+), 23 deletions(-)
>
> diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
> index edf94ca64eea..283a04e54f52 100644
> --- a/arch/powerpc/kernel/smp.c
> +++ b/arch/powerpc/kernel/smp.c
> @@ -1313,7 +1313,7 @@ int setup_profiling_timer(unsigned int multiplier)
> }
>
> #ifdef CONFIG_SCHED_SMT
> -/* cpumask of CPUs with asymetric SMT dependancy */
> +/* cpumask of CPUs with asymmetric SMT dependency */
> static int powerpc_smt_flags(void)
> {
> int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
> @@ -1326,14 +1326,6 @@ static int powerpc_smt_flags(void)
> }
> #endif
>
> -static struct sched_domain_topology_level powerpc_topology[] = {
> -#ifdef CONFIG_SCHED_SMT
> - { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
> -#endif
> - { cpu_cpu_mask, SD_INIT_NAME(DIE) },
> - { NULL, },
> -};
> -
> /*
> * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
> * This topology makes it *much* cheaper to migrate tasks between adjacent cores
> @@ -1351,7 +1343,13 @@ static int powerpc_shared_cache_flags(void)
> */
> static const struct cpumask *shared_cache_mask(int cpu)
> {
> - return cpu_l2_cache_mask(cpu);
> + if (shared_caches)
> + return cpu_l2_cache_mask(cpu);
> +
> + if (has_big_cores)
> + return cpu_smallcore_mask(cpu);
> +
> + return per_cpu(cpu_sibling_map, cpu);
> }
>
> #ifdef CONFIG_SCHED_SMT
> @@ -1361,7 +1359,7 @@ static const struct cpumask *smallcore_smt_mask(int cpu)
> }
> #endif
>
> -static struct sched_domain_topology_level power9_topology[] = {
> +static struct sched_domain_topology_level powerpc_topology[] = {
> #ifdef CONFIG_SCHED_SMT
> { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
> #endif
> @@ -1386,21 +1384,10 @@ void __init smp_cpus_done(unsigned int max_cpus)
> #ifdef CONFIG_SCHED_SMT
> if (has_big_cores) {
> pr_info("Big cores detected but using small core scheduling\n");
> - power9_topology[0].mask = smallcore_smt_mask;
> powerpc_topology[0].mask = smallcore_smt_mask;
> }
> #endif
> - /*
> - * If any CPU detects that it's sharing a cache with another CPU then
> - * use the deeper topology that is aware of this sharing.
> - */
> - if (shared_caches) {
> - pr_info("Using shared cache scheduler topology\n");
> - set_sched_topology(power9_topology);
> - } else {
> - pr_info("Using standard scheduler topology\n");
> - set_sched_topology(powerpc_topology);
> - }
> + set_sched_topology(powerpc_topology);
> }
>
> #ifdef CONFIG_HOTPLUG_CPU
> --
> 2.18.2
>
Powered by blists - more mailing lists