lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <531EE087.7050707@linux.vnet.ibm.com>
Date:	Tue, 11 Mar 2014 15:38:07 +0530
From:	Preeti U Murthy <preeti@...ux.vnet.ibm.com>
To:	Vincent Guittot <vincent.guittot@...aro.org>
CC:	peterz@...radead.org, mingo@...nel.org,
	linux-kernel@...r.kernel.org, tony.luck@...el.com,
	fenghua.yu@...el.com, schwidefsky@...ibm.com,
	james.hogan@...tec.com, cmetcalf@...era.com,
	benh@...nel.crashing.org, linux@....linux.org.uk,
	linux-arm-kernel@...ts.infradead.org, dietmar.eggemann@....com,
	linaro-kernel@...ts.linaro.org
Subject: Re: [RFC 4/6] sched: powerpc: create a dedicated topology table

Hi Vincent,

On 03/05/2014 12:48 PM, Vincent Guittot wrote:
> Create a dedicated topology table for handling asymetric feature.
> The current proposal creates a new level which describes which groups of CPUs
> take adavantge of SD_ASYM_PACKING. The useless level will be removed during the
> build of the sched_domain topology.
> 
> Another solution would be to set SD_ASYM_PACKING in the sd_flags of SMT level
> during the boot sequence and before the build of the sched_domain topology.

Is the below what you mean as the other solution? If it is so, I would
strongly recommend this approach rather than adding another level to the
topology level to represent the asymmetric behaviour.

+static struct sched_domain_topology_level powerpc_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+	{ cpu_smt_mask, SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES,
SD_INIT_NAME(SMT) | arch_sd_sibling_asym_packing() },
+#endif
+	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
+	{ NULL, },
+};

Regards
Preeti U Murthy
> 
> Signed-off-by: Vincent Guittot <vincent.guittot@...aro.org>
> ---
>  arch/powerpc/kernel/smp.c |   35 +++++++++++++++++++++++++++--------
>  kernel/sched/core.c       |    6 ------
>  2 files changed, 27 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
> index ac2621a..75da054 100644
> --- a/arch/powerpc/kernel/smp.c
> +++ b/arch/powerpc/kernel/smp.c
> @@ -755,6 +755,32 @@ int setup_profiling_timer(unsigned int multiplier)
>  	return 0;
>  }
> 
> +#ifdef CONFIG_SCHED_SMT
> +/* cpumask of CPUs with asymetric SMT dependancy */
> +static const struct cpumask *cpu_asmt_mask(int cpu)
> +{
> +	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
> +		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
> +		return topology_thread_cpumask(cpu);
> +	}
> +	return cpumask_of(cpu);
> +}
> +#endif
> +
> +static struct sched_domain_topology_level powerpc_topology[] = {
> +#ifdef CONFIG_SCHED_SMT
> +	{ cpu_asmt_mask, SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING, SD_INIT_NAME(ASMT) },
> +	{ cpu_smt_mask, SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES, SD_INIT_NAME(SMT) },
> +#endif
> +	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
> +	{ NULL, },
> +};
> +
> +static void __init set_sched_topology(void)
> +{
> +	sched_domain_topology = powerpc_topology;
> +}
> +
>  void __init smp_cpus_done(unsigned int max_cpus)
>  {
>  	cpumask_var_t old_mask;
> @@ -779,15 +805,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
> 
>  	dump_numa_cpu_topology();
> 
> -}
> +	set_sched_topology();
> 
> -int arch_sd_sibling_asym_packing(void)
> -{
> -	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
> -		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
> -		return SD_ASYM_PACKING;
> -	}
> -	return 0;
>  }
> 
>  #ifdef CONFIG_HOTPLUG_CPU
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 3479467..7606de0 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -5818,11 +5818,6 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
>  	atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
>  }
> 
> -int __weak arch_sd_sibling_asym_packing(void)
> -{
> -       return 0*SD_ASYM_PACKING;
> -}
> -
>  /*
>   * Initializers for schedule domains
>   * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
> @@ -6000,7 +5995,6 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
>  	if (sd->flags & SD_SHARE_CPUPOWER) {
>  		sd->imbalance_pct = 110;
>  		sd->smt_gain = 1178; /* ~15% */
> -		sd->flags |= arch_sd_sibling_asym_packing();
> 
>  	} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
>  		sd->imbalance_pct = 117;
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ