[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250321205722.3947901-4-csander@purestorage.com>
Date: Fri, 21 Mar 2025 14:57:22 -0600
From: Caleb Sander Mateos <csander@...estorage.com>
To: Keith Busch <kbusch@...nel.org>,
Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>
Cc: linux-nvme@...ts.infradead.org,
linux-kernel@...r.kernel.org,
Caleb Sander Mateos <csander@...estorage.com>
Subject: [PATCH v2 3/3] nvme/ioctl: move fixed buffer lookup to nvme_uring_cmd_io()
nvme_map_user_request() is called from both nvme_submit_user_cmd() and
nvme_uring_cmd_io(). But the ioucmd branch is only applicable to
nvme_uring_cmd_io(). Move it to nvme_uring_cmd_io() and just pass the
resulting iov_iter to nvme_map_user_request().
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
drivers/nvme/host/ioctl.c | 42 ++++++++++++++++++++++-----------------
1 file changed, 24 insertions(+), 18 deletions(-)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 843371e6e1e2..98489161e029 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -112,11 +112,11 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
}
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- struct io_uring_cmd *ioucmd, unsigned int flags)
+ struct iov_iter *iter, unsigned int flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
@@ -134,27 +134,16 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
}
- if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
- struct iov_iter iter;
-
- /* fixedbufs is only for non-vectored io */
- if (flags & NVME_IOCTL_VEC)
- return -EINVAL;
-
- ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
- rq_data_dir(req), &iter, ioucmd);
- if (ret < 0)
- return ret;
- ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
- } else {
+ if (iter)
+ ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL);
+ else
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
- }
if (ret)
return ret;
bio = req->bio;
@@ -511,13 +500,30 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (IS_ERR(req))
return PTR_ERR(req);
req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
if (d.addr && d.data_len) {
- ret = nvme_map_user_request(req, d.addr,
- d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, ioucmd, vec);
+ struct iov_iter iter;
+ struct iov_iter *map_iter = NULL;
+
+ if (ioucmd->flags & IORING_URING_CMD_FIXED) {
+ /* fixedbufs is only for non-vectored io */
+ if (vec) {
+ ret = -EINVAL;
+ goto out_free_req;
+ }
+
+ ret = io_uring_cmd_import_fixed(d.addr, d.data_len,
+ rq_data_dir(req), &iter, ioucmd);
+ if (ret < 0)
+ goto out_free_req;
+
+ map_iter = &iter;
+ }
+ ret = nvme_map_user_request(req, d.addr, d.data_len,
+ nvme_to_user_ptr(d.metadata), d.metadata_len,
+ map_iter, vec);
if (ret)
goto out_free_req;
}
/* to free bio on completion, as req->bio will be null at that time */
--
2.45.2
Powered by blists - more mailing lists