[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAE9FiQUeWZrbCJqH=ObVbX9377PyEU+8NZrQKNg8v+a7XJENpg@mail.gmail.com>
Date: Thu, 10 May 2012 10:15:15 -0700
From: Yinghai Lu <yinghai@...nel.org>
To: Peter Zijlstra <peterz@...radead.org>
Cc: mingo@...nel.org, hpa@...or.com, linux-kernel@...r.kernel.org,
tj@...nel.org, tglx@...utronix.de,
linux-tip-commits@...r.kernel.org
Subject: Re: [tip:sched/core] x86/numa: Check for nonsensical topologies on
real hw as well
On Thu, May 10, 2012 at 6:54 AM, Peter Zijlstra <peterz@...radead.org> wrote:
>
> OK here's one that compiles and boots without silly warnings on a WSM-EP
>
>
> ---
> arch/x86/kernel/smpboot.c | 112 ++++++++++++++++++++++++++-------------------
> 1 file changed, 66 insertions(+), 46 deletions(-)
>
> diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
> index e543e02..e8bf636 100644
> --- a/arch/x86/kernel/smpboot.c
> +++ b/arch/x86/kernel/smpboot.c
> @@ -299,70 +299,90 @@ void __cpuinit smp_store_cpu_info(int id)
> identify_secondary_cpu(c);
> }
>
> -static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
> +static bool __cpuinit
> +topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
> {
> - cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
> - cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
> - cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
> - cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
> - cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
> - cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
> + int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
> +
> + return !WARN(cpu_to_node(cpu1) != cpu_to_node(cpu2),
> + "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
> + "[node: %d != %d]. Ignoring dependency.\n",
> + cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
> }
>
> +#define link_mask(_m, c1, c2) \
> +do { \
> + cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \
> + cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
> +} while (0)
>
> -void __cpuinit set_cpu_sibling_map(int cpu)
> +static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
> {
> - int i;
> - struct cpuinfo_x86 *c = &cpu_data(cpu);
> + if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
> + int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
>
> - cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
> + if (c->phys_proc_id == o->phys_proc_id &&
> + per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
> + c->compute_unit_id == o->compute_unit_id)
> + return topology_sane(c, o, "smt");
>
> - if (smp_num_siblings > 1) {
> - for_each_cpu(i, cpu_sibling_setup_mask) {
> - struct cpuinfo_x86 *o = &cpu_data(i);
> + } else if (c->phys_proc_id == o->phys_proc_id &&
> + c->cpu_core_id == o->cpu_core_id) {
> + return topology_sane(c, o, "smt");
> + }
>
> - if (cpu_to_node(cpu) != cpu_to_node(i)) {
> - WARN_ONCE(1, "sched: CPU #%d's thread-sibling CPU #%d not on the same node! [node %d != %d]. Ignoring sibling dependency.\n", cpu, i, cpu_to_node(cpu), cpu_to_node(i));
> - continue;
> - }
> + return false;
> +}
>
> - if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
> - if (c->phys_proc_id == o->phys_proc_id &&
> - per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) &&
> - c->compute_unit_id == o->compute_unit_id)
> - link_thread_siblings(cpu, i);
> - } else if (c->phys_proc_id == o->phys_proc_id &&
> - c->cpu_core_id == o->cpu_core_id) {
> - link_thread_siblings(cpu, i);
> - }
> - }
> - } else {
> - cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
> - }
> +static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
> +{
> + int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
> +
> + if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
> + per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
> + return topology_sane(c, o, "llc");
> +
> + return false;
> +}
> +
> +static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
> +{
> + if (c->phys_proc_id == o->phys_proc_id)
> + return topology_sane(c, o, "mc");
> +
> + return false;
> +}
> +
> +void __cpuinit set_cpu_sibling_map(int cpu)
> +{
> + bool has_mc = boot_cpu_data.x86_max_cores > 1;
> + bool has_smt = smp_num_siblings > 1;
> + struct cpuinfo_x86 *c = &cpu_data(cpu);
> + struct cpuinfo_x86 *o;
> + int i;
>
> - cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
> + cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
>
> - if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
> - cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
> + if (!has_smt && !has_mc) {
> + cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
> + cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
> + cpumask_set_cpu(cpu, cpu_core_mask(cpu));
> c->booted_cores = 1;
> return;
> }
>
> for_each_cpu(i, cpu_sibling_setup_mask) {
> - if (cpu_to_node(cpu) != cpu_to_node(i)) {
> - WARN_ONCE(1, "sched: CPU #%d's core-sibling CPU #%d not on the same node! [node %d != %d]. Ignoring sibling dependency.\n", cpu, i, cpu_to_node(cpu), cpu_to_node(i));
> - continue;
> - }
> + o = &cpu_data(i);
>
> - if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
> - per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
> - cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
> - cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
> - }
> + if ((i == cpu) || (has_smt && match_smt(c, o)))
> + link_mask(sibling, cpu, i);
> +
> + if ((i == cpu) || (has_mc && match_llc(c, o)))
> + link_mask(llc_shared, cpu, i);
> +
> + if ((i == cpu) || (has_mc && match_mc(c, o))) {
> + link_mask(core, cpu, i);
>
> - if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
> - cpumask_set_cpu(i, cpu_core_mask(cpu));
> - cpumask_set_cpu(cpu, cpu_core_mask(i));
> /*
> * Does this new cpu bringup a new core?
> */
>
Yes, this fixes the wrong warning go away.
Thanks
Yinghai Lu
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists