Its not deemed proper to have full cpumasks in data anymore, we have to dynamically allocate them based on runtime determined dimensions of the machine in question. Signed-off-by: Peter Zijlstra --- kernel/smp.c | 54 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 9 deletions(-) Index: linux-2.6/kernel/smp.c =================================================================== --- linux-2.6.orig/kernel/smp.c +++ linux-2.6/kernel/smp.c @@ -10,6 +10,7 @@ #include #include #include +#include static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); @@ -40,7 +41,7 @@ struct call_function_data { spinlock_t lock; unsigned int refs; struct list_head free_list; - struct cpumask cpumask; + cpumask_var_t cpumask; }; struct call_single_queue { @@ -48,8 +49,41 @@ struct call_single_queue { spinlock_t lock; }; +static DEFINE_PER_CPU(struct call_function_data, cfd_data); + +static int +hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + struct call_function_data *cfd = &per_cpu(cfd_data, cpu); + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, + cpu_to_node(cpu))) + return NOTIFY_BAD; + break; + + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: + + case CPU_DEAD: + case CPU_DEAD_FROZEN: + free_cpumask_var(cfd->cpumask); + break; + }; + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { + .notifier_call = hotplug_cfd, +}; + static int __cpuinit init_call_single_data(void) { + void *cpu = (void *)(long)smp_processor_id(); int i; for_each_possible_cpu(i) { @@ -58,6 +92,10 @@ static int __cpuinit init_call_single_da spin_lock_init(&q->lock); INIT_LIST_HEAD(&q->list); } + + hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); + register_cpu_notifier(&hotplug_cfd_notifier); + return 0; } early_initcall(init_call_single_data); @@ -116,13 +154,13 @@ void generic_smp_call_function_interrupt LIST_HEAD(free_list); int refs; - if (!cpumask_test_cpu(cpu, &data->cpumask)) + if (!cpumask_test_cpu(cpu, data->cpumask)) continue; data->csd.func(data->csd.info); spin_lock(&data->lock); - cpumask_clear_cpu(cpu, &data->cpumask); + cpumask_clear_cpu(cpu, data->cpumask); WARN_ON(data->refs == 0); data->refs--; refs = data->refs; @@ -313,8 +351,6 @@ void __smp_call_function_single(int cpu, arch_send_call_function_ipi(*(maskp)) #endif -static DEFINE_PER_CPU(struct call_function_data, cfd_data); - /** * smp_call_function_many(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on (only runs on online subset). @@ -373,9 +409,9 @@ void smp_call_function_many(const struct data->csd.flags |= CSD_FLAG_WAIT; data->csd.func = func; data->csd.info = info; - cpumask_and(&data->cpumask, mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &data->cpumask); - data->refs = cpumask_weight(&data->cpumask); + cpumask_and(data->cpumask, mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), data->cpumask); + data->refs = cpumask_weight(data->cpumask); spin_lock_irqsave(&call_function.lock, flags); call_function.counter++; @@ -388,7 +424,7 @@ void smp_call_function_many(const struct smp_mb(); /* Send a message to all CPUs in the map */ - arch_send_call_function_ipi_mask(&data->cpumask); + arch_send_call_function_ipi_mask(data->cpumask); /* optionally wait for the CPUs to complete */ if (wait) -- -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/