[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20110608192049.GB12457@elte.hu>
Date: Wed, 8 Jun 2011 21:20:49 +0200
From: Ingo Molnar <mingo@...e.hu>
To: Suresh Siddha <suresh.b.siddha@...el.com>,
Rusty Russell <rusty@...tcorp.com.au>,
Tejun Heo <tj@...nel.org>
Cc: tglx@...utronix.de, hpa@...or.com, trenn@...ell.com,
prarit@...hat.com, tj@...nel.org, linux-kernel@...r.kernel.org,
youquan.song@...el.com, stable@...nel.org
Subject: Re: [patch v2 1/2] stop_machine: enable __stop_machine() to be
called from the cpu online path
Rusty, Tejun, what do you think about the patch below?
Thanks,
Ingo
* Suresh Siddha <suresh.b.siddha@...el.com> wrote:
> Currently stop machine infrastructure can be called only from a cpu that is
> online. But for !CONFIG_SMP, we do allow calling __stop_machine() before the
> cpu is online.
>
> x86 for example requires stop machine infrastructure in the cpu online path
> and currently implements its own stop machine (using stop_one_cpu_nowait())
> for MTRR initialization in the cpu online path.
>
> Enhance the __stop_machine() so that it can be called before the cpu
> is onlined. This will pave the way for code consolidation and address potential
> deadlocks caused by multiple mechanisms of doing system wide rendezvous.
>
> This will also address the behavioral differences of __stop_machine()
> between SMP and UP builds.
>
> Signed-off-by: Suresh Siddha <suresh.b.siddha@...el.com>
> Cc: stable@...nel.org # v2.6.35+
> ---
> kernel/stop_machine.c | 62 +++++++++++++++++++++++++++++++++++++++++++++-----
> 1 file changed, 57 insertions(+), 5 deletions(-)
>
> Index: linux-2.6-tip/kernel/stop_machine.c
> ===================================================================
> --- linux-2.6-tip.orig/kernel/stop_machine.c
> +++ linux-2.6-tip/kernel/stop_machine.c
> @@ -136,12 +136,35 @@ void stop_one_cpu_nowait(unsigned int cp
> static DEFINE_MUTEX(stop_cpus_mutex);
> static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
>
> +/**
> + * __stop_cpus - stop multiple cpus
> + * @cpumask: cpus to stop
> + * @fn: function to execute
> + * @arg: argument to @fn
> + *
> + * Execute @fn(@arg) on online cpus in @cpumask. If @cpumask is NULL, @fn
> + * is run on all the online cpus including the current cpu (even if it
> + * is not online).
> + * On each target cpu, @fn is run in a process context (except when run on
> + * the cpu that is in the process of coming online, in which case @fn is run
> + * in the same context as __stop_cpus()) with the highest priority
> + * preempting any task on the cpu and monopolizing it. This function
> + * returns after all executions are complete.
> + */
> int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
> {
> + int online = percpu_read(cpu_stopper.enabled);
> + int include_this_offline = 0;
> struct cpu_stop_work *work;
> struct cpu_stop_done done;
> + unsigned int weight;
> unsigned int cpu;
>
> + if (!cpumask) {
> + cpumask = cpu_online_mask;
> + include_this_offline = 1;
> + }
> +
> /* initialize works and done */
> for_each_cpu(cpu, cpumask) {
> work = &per_cpu(stop_cpus_work, cpu);
> @@ -149,7 +172,12 @@ int __stop_cpus(const struct cpumask *cp
> work->arg = arg;
> work->done = &done;
> }
> - cpu_stop_init_done(&done, cpumask_weight(cpumask));
> +
> + weight = cpumask_weight(cpumask);
> + if (!online && include_this_offline)
> + weight++;
> +
> + cpu_stop_init_done(&done, weight);
>
> /*
> * Disable preemption while queueing to avoid getting
> @@ -162,7 +190,20 @@ int __stop_cpus(const struct cpumask *cp
> &per_cpu(stop_cpus_work, cpu));
> preempt_enable();
>
> - wait_for_completion(&done.completion);
> + if (online)
> + wait_for_completion(&done.completion);
> + else {
> + /*
> + * This cpu is not yet online. If @fn needs to be run on this
> + * cpu, run it now. Also, we can't afford to sleep here,
> + * so poll till the work is completed on all the cpu's.
> + */
> + if (include_this_offline)
> + fn(arg);
> + while (atomic_read(&done.nr_todo) > 1)
> + cpu_relax();
> + }
> +
> return done.executed ? done.ret : -ENOENT;
> }
>
> @@ -431,6 +472,7 @@ static int stop_machine_cpu_stop(void *d
> struct stop_machine_data *smdata = data;
> enum stopmachine_state curstate = STOPMACHINE_NONE;
> int cpu = smp_processor_id(), err = 0;
> + unsigned long flags = 0;
> bool is_active;
>
> if (!smdata->active_cpus)
> @@ -446,7 +488,7 @@ static int stop_machine_cpu_stop(void *d
> curstate = smdata->state;
> switch (curstate) {
> case STOPMACHINE_DISABLE_IRQ:
> - local_irq_disable();
> + local_irq_save(flags);
> hard_irq_disable();
> break;
> case STOPMACHINE_RUN:
> @@ -460,7 +502,7 @@ static int stop_machine_cpu_stop(void *d
> }
> } while (curstate != STOPMACHINE_EXIT);
>
> - local_irq_enable();
> + local_irq_restore(flags);
> return err;
> }
>
> @@ -470,9 +512,19 @@ int __stop_machine(int (*fn)(void *), vo
> .num_threads = num_online_cpus(),
> .active_cpus = cpus };
>
> + /* Include the calling cpu that might not be online yet. */
> + if (!percpu_read(cpu_stopper.enabled))
> + smdata.num_threads++;
> +
> /* Set the initial state and stop all online cpus. */
> set_state(&smdata, STOPMACHINE_PREPARE);
> - return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
> +
> + if (percpu_read(cpu_stopper.enabled))
> + return stop_cpus(cpu_online_mask, stop_machine_cpu_stop,
> + &smdata);
> + else
> + return __stop_cpus(NULL, stop_machine_cpu_stop,
> + &smdata);
> }
>
> int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
>
--
Thanks,
Ingo
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists