[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250321205722.3947901-3-csander@purestorage.com>
Date: Fri, 21 Mar 2025 14:57:21 -0600
From: Caleb Sander Mateos <csander@...estorage.com>
To: Keith Busch <kbusch@...nel.org>,
Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>
Cc: linux-nvme@...ts.infradead.org,
linux-kernel@...r.kernel.org,
Caleb Sander Mateos <csander@...estorage.com>
Subject: [PATCH v2 2/3] nvme/ioctl: don't call blk_mq_free_request() in nvme_map_user_request()
The callers of nvme_map_user_request() (nvme_submit_user_cmd() and
nvme_uring_cmd_io()) allocate the request, so have them free it if
nvme_map_user_request() fails.
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
drivers/nvme/host/ioctl.c | 31 ++++++++++++++++---------------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 6c27d72e8cee..843371e6e1e2 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -126,40 +126,38 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
int ret;
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
- if (!supports_metadata) {
- ret = -EINVAL;
- goto out;
- }
+ if (!supports_metadata)
+ return -EINVAL;
+
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
}
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
/* fixedbufs is only for non-vectored io */
- if (flags & NVME_IOCTL_VEC) {
- ret = -EINVAL;
- goto out;
- }
+ if (flags & NVME_IOCTL_VEC)
+ return -EINVAL;
+
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd);
if (ret < 0)
- goto out;
+ return ret;
ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
} else {
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
}
if (ret)
- goto out;
+ return ret;
bio = req->bio;
if (bdev)
bio_set_dev(bio, bdev);
@@ -172,12 +170,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
return ret;
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
-out:
- blk_mq_free_request(req);
return ret;
}
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
@@ -198,11 +194,11 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
meta_len, NULL, flags);
if (ret)
- return ret;
+ goto out_free_req;
}
bio = req->bio;
ctrl = nvme_req(req)->ctrl;
@@ -210,15 +206,16 @@ static int nvme_submit_user_cmd(struct request_queue *q,
ret = nvme_execute_rq(req, false);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (bio)
blk_rq_unmap_user(bio);
- blk_mq_free_request(req);
if (effects)
nvme_passthru_end(ctrl, ns, effects, cmd, ret);
+out_free_req:
+ blk_mq_free_request(req);
return ret;
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
@@ -518,20 +515,24 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (d.addr && d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
d.metadata_len, ioucmd, vec);
if (ret)
- return ret;
+ goto out_free_req;
}
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
pdu->req = req;
req->end_io_data = ioucmd;
req->end_io = nvme_uring_cmd_end_io;
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
+
+out_free_req:
+ blk_mq_free_request(req);
+ return ret;
}
static bool is_ctrl_ioctl(unsigned int cmd)
{
if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
--
2.45.2
Powered by blists - more mailing lists