[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <x494okguxxp.fsf@segfault.boston.devel.redhat.com>
Date: Tue, 16 Mar 2010 10:46:10 -0400
From: Jeff Moyer <jmoyer@...hat.com>
To: Sergey Temerkhanov <temerkhanov@...ronik.ru>
Cc: LKML <linux-kernel@...r.kernel.org>,
linux-aio <linux-aio@...ck.org>
Subject: Re: [PATCH 1/2] AIO: Fix reference counting in io_cancel and aio_cancel_all
Sergey Temerkhanov <temerkhanov@...ronik.ru> writes:
> io_cancel() and aio_cancel_all() have inconsistent reference counting
> (ki_users field of struct kiocb) which leads to unkillable processes upon
> io_cancel() or io_destroy() syscalls. This patch fixes the undesired behavior.
Hi, Sergey,
Thanks for the patch. Would you mind resubmitting it to make it a bit
easier to review? The way you've done things, it is difficult to tell
if you just moved the aio_cancel_all function or if you moved it and
made changes. Please have the patch that moves it separated out from
other changes, or at least mention in the changelog that the function
was unchanged. Next, if you could tell what sorts of testing you've
performed, that would be great. Finally, it would be a good idea to CC
linux-aio@...ck.org on aio patches.
Thanks!
Jeff
>
> Regards, Sergey Temerkhanov, Cifronic ZAO
> diff -r b290b47c0880 fs/aio.c
> --- a/fs/aio.c Tue Mar 16 13:51:13 2010 +0300
> +++ b/fs/aio.c Tue Mar 16 14:11:36 2010 +0300
> @@ -63,6 +63,8 @@
> static void aio_kick_handler(struct work_struct *);
> static void aio_queue_work(struct kioctx *);
>
> +static void aio_cancel_all(struct kioctx *ctx);
> +
> /* aio_setup
> * Creates the slab caches used by the aio routines, panic on
> * failure as this is done early during the boot sequence.
> @@ -315,33 +317,6 @@
> return ctx;
> }
>
> -/* aio_cancel_all
> - * Cancels all outstanding aio requests on an aio context. Used
> - * when the processes owning a context have all exited to encourage
> - * the rapid destruction of the kioctx.
> - */
> -static void aio_cancel_all(struct kioctx *ctx)
> -{
> - int (*cancel)(struct kiocb *, struct io_event *);
> - struct io_event res;
> - spin_lock_irq(&ctx->ctx_lock);
> - ctx->dead = 1;
> - while (!list_empty(&ctx->active_reqs)) {
> - struct list_head *pos = ctx->active_reqs.next;
> - struct kiocb *iocb = list_kiocb(pos);
> - list_del_init(&iocb->ki_list);
> - cancel = iocb->ki_cancel;
> - kiocbSetCancelled(iocb);
> - if (cancel) {
> - iocb->ki_users++;
> - spin_unlock_irq(&ctx->ctx_lock);
> - cancel(iocb, &res);
> - spin_lock_irq(&ctx->ctx_lock);
> - }
> - }
> - spin_unlock_irq(&ctx->ctx_lock);
> -}
> -
> static void wait_for_all_aios(struct kioctx *ctx)
> {
> struct task_struct *tsk = current;
> @@ -1038,6 +1013,36 @@
> return ret;
> }
>
> +/* aio_cancel_all
> + * Cancels all outstanding aio requests on an aio context. Used
> + * when the processes owning a context have all exited to encourage
> + * the rapid destruction of the kioctx.
> + */
> +static void aio_cancel_all(struct kioctx *ctx)
> +{
> + int (*cancel)(struct kiocb *, struct io_event *);
> + struct io_event res;
> + spin_lock_irq(&ctx->ctx_lock);
> + ctx->dead = 1;
> + while (!list_empty(&ctx->active_reqs)) {
> + struct list_head *pos = ctx->active_reqs.next;
> + struct kiocb *iocb = list_kiocb(pos);
> + list_del_init(&iocb->ki_list);
> + cancel = iocb->ki_cancel;
> + kiocbSetCancelled(iocb);
> + if (cancel) {
> + iocb->ki_users++;
> + spin_unlock_irq(&ctx->ctx_lock);
> + cancel(iocb, &res);
> + spin_lock_irq(&ctx->ctx_lock);
> + dprintk("aio_cancel_all: iocb->ki_users: %d\n", iocb->ki_users);
> + __aio_put_req(ctx, iocb);
> + }
> + __aio_put_req(ctx, iocb);
> + }
> + spin_unlock_irq(&ctx->ctx_lock);
> +}
> +
> /* aio_read_evt
> * Pull an event off of the ioctx's event ring. Returns the number of
> * events fetched (0 or 1 ;-)
> @@ -1787,9 +1792,11 @@
> if (copy_to_user(result, &tmp, sizeof(tmp)))
> ret = -EFAULT;
> }
> + aio_put_req(kiocb);
> } else
> ret = -EINVAL;
>
> + aio_put_req(kiocb);
> put_ioctx(ctx);
>
> return ret;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists