[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <3db3cb928e4bc7670a0e7e105b2b417897c4b96e.1582932860.git.asml.silence@gmail.com>
Date: Sat, 29 Feb 2020 02:37:26 +0300
From: Pavel Begunkov <asml.silence@...il.com>
To: Jens Axboe <axboe@...nel.dk>, io-uring@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 2/5] io_uring/io-wq: pass *work instead of **workptr
Now work->func() never modifies passed workptr.
Remove extra indirection by passing struct work*
instead of a pointer to that.
Also, it leaves (work != old_work) dancing in io_worker_handle_work(),
as it'll be reused shortly.
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
fs/io-wq.c | 11 +++++------
fs/io-wq.h | 2 +-
fs/io_uring.c | 25 ++++++++++++-------------
3 files changed, 18 insertions(+), 20 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index a05c32df2046..a830eddaffbe 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -503,7 +503,7 @@ static void io_worker_handle_work(struct io_worker *worker)
}
old_work = work;
- work->func(&work);
+ work->func(work);
spin_lock_irq(&worker->lock);
worker->cur_work = NULL;
@@ -756,7 +756,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
*/
if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ work->func(work);
return;
}
@@ -896,7 +896,7 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ work->func(work);
return IO_WQ_CANCEL_OK;
}
@@ -972,7 +972,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ work->func(work);
return IO_WQ_CANCEL_OK;
}
@@ -1049,9 +1049,8 @@ struct io_wq_flush_data {
struct completion done;
};
-static void io_wq_flush_func(struct io_wq_work **workptr)
+static void io_wq_flush_func(struct io_wq_work *work)
{
- struct io_wq_work *work = *workptr;
struct io_wq_flush_data *data;
data = container_of(work, struct io_wq_flush_data, work);
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 001194aef6ae..508615af4552 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -68,7 +68,7 @@ struct io_wq_work {
struct io_wq_work_node list;
void *data;
};
- void (*func)(struct io_wq_work **);
+ void (*func)(struct io_wq_work *);
struct files_struct *files;
struct mm_struct *mm;
const struct cred *creds;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ee75e503964d..54dfa9b71864 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -793,7 +793,7 @@ static const struct io_op_def io_op_defs[] = {
}
};
-static void io_wq_submit_work(struct io_wq_work **workptr);
+static void io_wq_submit_work(struct io_wq_work *work);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void __io_double_put_req(struct io_kiocb *req);
@@ -2568,9 +2568,9 @@ static void __io_fsync(struct io_kiocb *req)
io_put_req(req);
}
-static void io_fsync_finish(struct io_wq_work **workptr)
+static void io_fsync_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
if (io_req_cancelled(req))
return;
@@ -2604,9 +2604,9 @@ static void __io_fallocate(struct io_kiocb *req)
io_put_req(req);
}
-static void io_fallocate_finish(struct io_wq_work **workptr)
+static void io_fallocate_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
__io_fallocate(req);
}
@@ -2970,9 +2970,9 @@ static void __io_close_finish(struct io_kiocb *req)
io_put_req(req);
}
-static void io_close_finish(struct io_wq_work **workptr)
+static void io_close_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
/* not cancellable, don't do io_req_cancelled() */
__io_close_finish(req);
@@ -3039,9 +3039,9 @@ static void __io_sync_file_range(struct io_kiocb *req)
}
-static void io_sync_file_range_finish(struct io_wq_work **workptr)
+static void io_sync_file_range_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
if (io_req_cancelled(req))
return;
@@ -3409,9 +3409,9 @@ static int __io_accept(struct io_kiocb *req, bool force_nonblock)
return 0;
}
-static void io_accept_finish(struct io_wq_work **workptr)
+static void io_accept_finish(struct io_wq_work *work)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
io_put_req(req);
@@ -4646,9 +4646,8 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static void io_wq_submit_work(struct io_wq_work **workptr)
+static void io_wq_submit_work(struct io_wq_work *work)
{
- struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
int ret = 0;
--
2.24.0
Powered by blists - more mailing lists