[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fb27a289-717c-b911-7981-db72cbc51c26@gmail.com>
Date: Mon, 2 Mar 2020 00:39:41 +0300
From: Pavel Begunkov <asml.silence@...il.com>
To: Jens Axboe <axboe@...nel.dk>, io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 9/9] io_uring: pass submission ref to async
On 01/03/2020 19:18, Pavel Begunkov wrote:
> Currenlty, every async work handler accepts a submission reference,
> which it should put. Also there is a reference grabbed in io_get_work()
> and dropped in io_put_work(). This patch merge them together.
>
> - So, ownership of the submission reference passed to io-wq, and it'll
> be put in io_put_work().
> - io_get_put() doesn't take a ref now and so deleted.
> - async handlers don't put the submission ref anymore.
> - make cancellation bits of io-wq to call {get,put}_work() handlers
Hmm, it makes them more like {init,fini}_work() and unbalanced/unpaired. May be
no a desirable thing.
> Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
> ---
> fs/io-wq.c | 17 +++++++++++++----
> fs/io_uring.c | 32 +++++++++++++-------------------
> 2 files changed, 26 insertions(+), 23 deletions(-)
>
> diff --git a/fs/io-wq.c b/fs/io-wq.c
> index f9b18c16ebd8..686ad043c6ac 100644
> --- a/fs/io-wq.c
> +++ b/fs/io-wq.c
> @@ -751,14 +751,23 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
> return true;
> }
>
> -static void io_run_cancel(struct io_wq_work *work)
> +static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
> {
> + struct io_wq *wq = wqe->wq;
> +
> do {
> struct io_wq_work *old_work = work;
> + bool is_internal = work->flags & IO_WQ_WORK_INTERNAL;
> +
> + if (wq->get_work && !is_internal)
> + wq->get_work(work);
>
> work->flags |= IO_WQ_WORK_CANCEL;
> work->func(&work);
> work = (work == old_work) ? NULL : work;
> +
> + if (wq->put_work && !is_internal)
> + wq->put_work(old_work);
> } while (work);
> }
>
> @@ -775,7 +784,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
> * It's close enough to not be an issue, fork() has the same delay.
> */
> if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
> - io_run_cancel(work);
> + io_run_cancel(work, wqe);
> return;
> }
>
> @@ -914,7 +923,7 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
> spin_unlock_irqrestore(&wqe->lock, flags);
>
> if (found) {
> - io_run_cancel(work);
> + io_run_cancel(work, wqe);
> return IO_WQ_CANCEL_OK;
> }
>
> @@ -989,7 +998,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
> spin_unlock_irqrestore(&wqe->lock, flags);
>
> if (found) {
> - io_run_cancel(work);
> + io_run_cancel(work, wqe);
> return IO_WQ_CANCEL_OK;
> }
>
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index d456b0ff6835..c6845a1e5aaa 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -1556,12 +1556,13 @@ static struct io_kiocb *io_put_req_submission(struct io_kiocb *req)
> return nxt;
> }
>
> -static void io_put_req_async_submission(struct io_kiocb *req,
> - struct io_wq_work **workptr)
> +static void io_steal_work(struct io_kiocb *req,
> + struct io_wq_work **workptr)
> {
> - static struct io_kiocb *nxt;
> + struct io_kiocb *nxt = NULL;
>
> - nxt = io_put_req_submission(req);
> + if (!(req->flags & REQ_F_DONT_STEAL_NEXT))
> + io_req_find_next(req, &nxt);
> if (nxt)
> io_wq_assign_next(workptr, nxt);
> }
> @@ -2575,7 +2576,7 @@ static bool io_req_cancelled(struct io_kiocb *req)
> if (req->work.flags & IO_WQ_WORK_CANCEL) {
> req_set_fail_links(req);
> io_cqring_add_event(req, -ECANCELED);
> - io_double_put_req(req);
> + io_put_req(req);
> return true;
> }
>
> @@ -2603,7 +2604,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
> if (io_req_cancelled(req))
> return;
> __io_fsync(req);
> - io_put_req_async_submission(req, workptr);
> + io_steal_work(req, workptr);
> }
>
> static int io_fsync(struct io_kiocb *req, bool force_nonblock)
> @@ -2636,7 +2637,7 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
> if (io_req_cancelled(req))
> return;
> __io_fallocate(req);
> - io_put_req_async_submission(req, workptr);
> + io_steal_work(req, workptr);
> }
>
> static int io_fallocate_prep(struct io_kiocb *req,
> @@ -3003,7 +3004,7 @@ static void io_close_finish(struct io_wq_work **workptr)
>
> /* not cancellable, don't do io_req_cancelled() */
> __io_close_finish(req);
> - io_put_req_async_submission(req, workptr);
> + io_steal_work(req, workptr);
> }
>
> static int io_close(struct io_kiocb *req, bool force_nonblock)
> @@ -3076,7 +3077,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
> if (io_req_cancelled(req))
> return;
> __io_sync_file_range(req);
> - io_put_req_async_submission(req, workptr);
> + io_steal_work(req, workptr);
> }
>
> static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
> @@ -3446,7 +3447,7 @@ static void io_accept_finish(struct io_wq_work **workptr)
> if (io_req_cancelled(req))
> return;
> __io_accept(req, false);
> - io_put_req_async_submission(req, workptr);
> + io_steal_work(req, workptr);
> }
> #endif
>
> @@ -4716,7 +4717,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
> io_put_req(req);
> }
>
> - io_put_req_async_submission(req, workptr);
> + io_steal_work(req, workptr);
> }
>
> static int io_req_needs_file(struct io_kiocb *req, int fd)
> @@ -6107,13 +6108,6 @@ static void io_put_work(struct io_wq_work *work)
> io_put_req(req);
> }
>
> -static void io_get_work(struct io_wq_work *work)
> -{
> - struct io_kiocb *req = container_of(work, struct io_kiocb, work);
> -
> - refcount_inc(&req->refs);
> -}
> -
> static int io_init_wq_offload(struct io_ring_ctx *ctx,
> struct io_uring_params *p)
> {
> @@ -6124,7 +6118,7 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx,
> int ret = 0;
>
> data.user = ctx->user;
> - data.get_work = io_get_work;
> + data.get_work = NULL;
> data.put_work = io_put_work;
>
> if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
>
--
Pavel Begunkov
Powered by blists - more mailing lists