[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20080229134342.GB10565@Krystal>
Date: Fri, 29 Feb 2008 08:43:42 -0500
From: Mathieu Desnoyers <mathieu.desnoyers@...ymtl.ca>
To: Jason Baron <jbaron@...hat.com>
Cc: akpm@...ux-foundation.org, Ingo Molnar <mingo@...e.hu>,
linux-kernel@...r.kernel.org, Rusty Russell <rusty@...tcorp.com.au>
Subject: Re: [patch 2/2] implement immediate updating via stop_machine_run()
* Jason Baron (jbaron@...hat.com) wrote:
>
> -Updating immediate values, cannot rely on smp_call_function() b/c synchronizing
> cpus using IPIs leads to deadlocks. Process A held a read lock on
> tasklist_lock, then process B called apply_imv_update(). Process A received the
> IPI and begins executing ipi_busy_loop(). Then process C takes a write lock
> irq on the task list lock, before receiving the IPI. Thus, process A holds up
> process C, and C can't get an IPI b/c interrupts are disabled. Solve this
> problem by using a new 'ALL_CPUS' parameter to stop_machine_run(). Which
> runs a function on all cpus after they are busy looping and have disabled
> irqs. Since this is done in a new process context, we don't have to worry
> about interrupted spin_locks. Also, less lines of code. Has survived 24 hours+
> of testing...
>
Ok, I am merging it in my patchset. Note that I have additionnal patches
in my LTTng tree that implement the lockless algorithm on top of these
and does not need the stop_machine for the immediate values. Therefore,
I won't be doing much testing of these 2 patches myself.
Thanks for taking care of this,
Mathieu
> Signed-off-by: Jason Baron <jbaron@...hat.com>
>
> ---
>
> kernel/immediate.c | 80 ++++++++++++++--------------------------------------
> 1 files changed, 21 insertions(+), 59 deletions(-)
>
>
> diff --git a/kernel/immediate.c b/kernel/immediate.c
> index 4c36a89..378a452 100644
> --- a/kernel/immediate.c
> +++ b/kernel/immediate.c
> @@ -20,6 +20,7 @@
> #include <linux/immediate.h>
> #include <linux/memory.h>
> #include <linux/cpu.h>
> +#include <linux/stop_machine.h>
>
> #include <asm/cacheflush.h>
>
> @@ -27,48 +28,33 @@
> * Kernel ready to execute the SMP update that may depend on trap and ipi.
> */
> static int imv_early_boot_complete;
> +static int wrote_text;
>
> extern const struct __imv __start___imv[];
> extern const struct __imv __stop___imv[];
>
> +static int stop_machine_imv_update(void *imv_ptr)
> +{
> + struct __imv *imv = imv_ptr;
> +
> + if (!started) {
> + text_poke((void *)imv->imv, (void *)imv->var, imv->size);
> + wrote_text = 1;
> + smp_wmb(); /* make sure other cpus see that this has run */
> + } else
> + sync_core();
> +
> + flush_icache_range(imv->imv, imv->imv + imv->size);
> +
> + return 0;
> +}
> +
> /*
> * imv_mutex nests inside module_mutex. imv_mutex protects builtin
> * immediates and module immediates.
> */
> static DEFINE_MUTEX(imv_mutex);
>
> -static atomic_t wait_sync;
> -
> -struct ipi_loop_data {
> - long value;
> - const struct __imv *imv;
> -} loop_data;
> -
> -static void ipi_busy_loop(void *arg)
> -{
> - unsigned long flags;
> -
> - local_irq_save(flags);
> - atomic_dec(&wait_sync);
> - do {
> - /* Make sure the wait_sync gets re-read */
> - smp_mb();
> - } while (atomic_read(&wait_sync) > loop_data.value);
> - atomic_dec(&wait_sync);
> - do {
> - /* Make sure the wait_sync gets re-read */
> - smp_mb();
> - } while (atomic_read(&wait_sync) > 0);
> - /*
> - * Issuing a synchronizing instruction must be done on each CPU before
> - * reenabling interrupts after modifying an instruction. Required by
> - * Intel's errata.
> - */
> - sync_core();
> - flush_icache_range(loop_data.imv->imv,
> - loop_data.imv->imv + loop_data.imv->size);
> - local_irq_restore(flags);
> -}
>
> /**
> * apply_imv_update - update one immediate value
> @@ -82,9 +68,6 @@ static void ipi_busy_loop(void *arg)
> */
> static int apply_imv_update(const struct __imv *imv)
> {
> - unsigned long flags;
> - long online_cpus;
> -
> /*
> * If the variable and the instruction have the same value, there is
> * nothing to do.
> @@ -111,30 +94,9 @@ static int apply_imv_update(const struct __imv *imv)
>
> if (imv_early_boot_complete) {
> kernel_text_lock();
> - get_online_cpus();
> - online_cpus = num_online_cpus();
> - atomic_set(&wait_sync, 2 * online_cpus);
> - loop_data.value = online_cpus;
> - loop_data.imv = imv;
> - smp_call_function(ipi_busy_loop, NULL, 1, 0);
> - local_irq_save(flags);
> - atomic_dec(&wait_sync);
> - do {
> - /* Make sure the wait_sync gets re-read */
> - smp_mb();
> - } while (atomic_read(&wait_sync) > online_cpus);
> - text_poke((void *)imv->imv, (void *)imv->var,
> - imv->size);
> - /*
> - * Make sure the modified instruction is seen by all CPUs before
> - * we continue (visible to other CPUs and local interrupts).
> - */
> - wmb();
> - atomic_dec(&wait_sync);
> - flush_icache_range(imv->imv,
> - imv->imv + imv->size);
> - local_irq_restore(flags);
> - put_online_cpus();
> + wrote_text = 0;
> + stop_machine_run(stop_machine_imv_update, (void *)imv,
> + ALL_CPUS);
> kernel_text_unlock();
> } else
> text_poke_early((void *)imv->imv, (void *)imv->var,
--
Mathieu Desnoyers
Computer Engineering Ph.D. Student, Ecole Polytechnique de Montreal
OpenPGP key fingerprint: 8CD5 52C3 8E3C 4140 715F BA06 3F25 A8FE 3BAE 9A68
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists