[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191017212858.13230-2-axboe@kernel.dk>
Date: Thu, 17 Oct 2019 15:28:56 -0600
From: Jens Axboe <axboe@...nel.dk>
To: linux-block@...r.kernel.org
Cc: davem@...emloft.net, netdev@...r.kernel.org,
Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 1/3] io_uring: add support for async work inheriting files table
This is in preparation for adding opcodes that need to modify files
in a process file table, either adding new ones or closing old ones.
If an opcode needs this, it must set REQ_F_NEED_FILES in the request
structure. If work that needs to get punted to async context have this
set, they will grab a reference to the process file table. When the
work is completed, the reference is dropped again.
Signed-off-by: Jens Axboe <axboe@...nel.dk>
---
fs/io_uring.c | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 635856023fdf..ad462237275e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -267,10 +267,11 @@ struct io_ring_ctx {
struct sqe_submit {
const struct io_uring_sqe *sqe;
unsigned short index;
+ bool has_user : 1;
+ bool in_async : 1;
+ bool needs_fixed_file : 1;
u32 sequence;
- bool has_user;
- bool in_async;
- bool needs_fixed_file;
+ struct files_struct *files;
};
/*
@@ -323,6 +324,7 @@ struct io_kiocb {
#define REQ_F_FAIL_LINK 256 /* fail rest of links */
#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
#define REQ_F_TIMEOUT 1024 /* timeout request */
+#define REQ_F_NEED_FILES 2048 /* needs to assume file table */
u64 user_data;
u32 result;
u32 sequence;
@@ -2191,6 +2193,7 @@ static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
static void io_sq_wq_submit_work(struct work_struct *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct files_struct *old_files = NULL;
struct io_ring_ctx *ctx = req->ctx;
struct mm_struct *cur_mm = NULL;
struct async_list *async_list;
@@ -2220,6 +2223,10 @@ static void io_sq_wq_submit_work(struct work_struct *work)
set_fs(USER_DS);
}
}
+ if (s->files && !old_files) {
+ old_files = current->files;
+ current->files = s->files;
+ }
if (!ret) {
s->has_user = cur_mm != NULL;
@@ -2312,6 +2319,11 @@ static void io_sq_wq_submit_work(struct work_struct *work)
unuse_mm(cur_mm);
mmput(cur_mm);
}
+ if (old_files) {
+ struct files_struct *files = current->files;
+ current->files = old_files;
+ put_files_struct(files);
+ }
}
/*
@@ -2413,6 +2425,8 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
s->sqe = sqe_copy;
memcpy(&req->submit, s, sizeof(*s));
+ if (req->flags & REQ_F_NEED_FILES)
+ req->submit.files = get_files_struct(current);
list = io_async_list_from_sqe(ctx, s->sqe);
if (!io_add_to_prev_work(list, req)) {
if (list)
@@ -2633,6 +2647,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
s->index = head;
s->sqe = &ctx->sq_sqes[head];
s->sequence = ctx->cached_sq_head;
+ s->files = NULL;
ctx->cached_sq_head++;
return true;
}
--
2.17.1
Powered by blists - more mailing lists