[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250430225234.2676781-10-csander@purestorage.com>
Date: Wed, 30 Apr 2025 16:52:34 -0600
From: Caleb Sander Mateos <csander@...estorage.com>
To: Ming Lei <ming.lei@...hat.com>,
Jens Axboe <axboe@...nel.dk>
Cc: Uday Shankar <ushankar@...estorage.com>,
linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
Caleb Sander Mateos <csander@...estorage.com>
Subject: [PATCH v2 9/9] ublk: store request pointer in ublk_io
A ublk_io is converted to a request in several places in the I/O path by
using blk_mq_tag_to_rq() to look up the (qid, tag) on the ublk device's
tagset. This involves a bunch of dereferences and a tag bounds check.
To make this conversion cheaper, store the request pointer in ublk_io.
Overlap this storage with the io_uring_cmd pointer. This is safe because
the io_uring_cmd pointer is only valid if UBLK_IO_FLAG_ACTIVE is set on
the ublk_io, the request pointer is valid if UBLK_IO_FLAG_OWNED_BY_SRV,
and these flags are mutually exclusive.
Suggested-by: Ming Lei <ming.lei@...hat.com>
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
Reviewed-by: Ming Lei <ming.lei@...hat.com>
---
drivers/block/ublk_drv.c | 43 ++++++++++++++++++++++------------------
1 file changed, 24 insertions(+), 19 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 97c61c0bf964..02e52b066318 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -140,11 +140,16 @@ struct ublk_io {
/* userspace buffer address from io cmd */
__u64 addr;
unsigned int flags;
int res;
- struct io_uring_cmd *cmd;
+ union {
+ /* valid if UBLK_IO_FLAG_ACTIVE is set */
+ struct io_uring_cmd *cmd;
+ /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */
+ struct request *req;
+ };
};
struct ublk_queue {
int q_id;
int q_depth;
@@ -1122,24 +1127,29 @@ static void ublk_complete_rq(struct kref *ref)
struct request *req = blk_mq_rq_from_pdu(data);
__ublk_complete_rq(req);
}
-static void ublk_complete_io_cmd(struct ublk_io *io, int res,
- unsigned issue_flags)
+static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
+ int res, unsigned issue_flags)
{
+ /* read cmd first because req will overwrite it */
+ struct io_uring_cmd *cmd = io->cmd;
+
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
/*
* clear ACTIVE since we are done with this sqe/cmd slot
* We can only accept io cmd in case of being not active.
*/
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+ io->req = req;
+
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
static inline void __ublk_abort_rq(struct ublk_queue *ubq,
@@ -1213,19 +1223,19 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
* and notify it.
*/
io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
pr_devel("%s: need get data. qid %d tag %d io_flags %x\n",
__func__, ubq->q_id, req->tag, io->flags);
- ublk_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA,
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA,
issue_flags);
return;
}
if (!ublk_start_io(ubq, req, io))
return;
- ublk_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
}
static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
@@ -1609,16 +1619,12 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
int i;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
- if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) {
- struct request *rq;
-
- rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
- __ublk_fail_req(ubq, io, rq);
- }
+ if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ __ublk_fail_req(ubq, io, io->req);
}
}
/* Must be called when queue is frozen */
static void ublk_mark_queue_canceling(struct ublk_queue *ubq)
@@ -1988,16 +1994,16 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
struct ublk_io *io, struct io_uring_cmd *cmd,
const struct ublksrv_io_cmd *ub_cmd)
{
- struct blk_mq_tags *tags = ubq->dev->tag_set.tags[ub_cmd->q_id];
- struct request *req = blk_mq_tag_to_rq(tags, ub_cmd->tag);
+ struct request *req;
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
return -EINVAL;
+ req = io->req;
if (ublk_need_map_io(ubq)) {
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
*/
@@ -2025,13 +2031,14 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
ublk_put_req_ref(ubq, req);
return 0;
}
-static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
- struct request *req)
+static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io)
{
+ struct request *req = io->req;
+
/*
* We have handled UBLK_IO_NEED_GET_DATA command,
* so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
* do the copy work.
*/
@@ -2053,11 +2060,10 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
struct ublk_queue *ubq;
struct ublk_io *io;
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
- struct request *req;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
ub_cmd->result);
@@ -2109,12 +2115,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
break;
case UBLK_IO_NEED_GET_DATA:
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
io->addr = ub_cmd->addr;
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
- if (!ublk_get_data(ubq, io, req))
+ if (!ublk_get_data(ubq, io))
return -EIOCBQUEUED;
return UBLK_IO_RES_OK;
default:
goto out;
--
2.45.2
Powered by blists - more mailing lists