[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250918014953.297897-18-csander@purestorage.com>
Date: Wed, 17 Sep 2025 19:49:53 -0600
From: Caleb Sander Mateos <csander@...estorage.com>
To: Ming Lei <ming.lei@...hat.com>,
Jens Axboe <axboe@...nel.dk>
Cc: linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
Caleb Sander Mateos <csander@...estorage.com>
Subject: [PATCH 17/17] ublk: don't access ublk_queue in ublk_unmap_io()
For ublk servers with many ublk queues, accessing the ublk_queue in
ublk_unmap_io() is a frequent cache miss. Pass to __ublk_complete_rq()
whether the ublk server's data buffer needs to be copied to the request.
In the callers __ublk_fail_req() and ublk_ch_uring_cmd_local(), get the
flags from the ublk_device instead, as its flags have just been read.
In ublk_put_req_ref(), pass false since all the features that require
reference counting disable copying of the data buffer upon completion.
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
drivers/block/ublk_drv.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index a677eca1ee86..5ab7ff5f03f4 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -527,11 +527,12 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
return BLK_STS_NOTSUPP;
}
#endif
-static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io);
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
.name = "ublk-char",
};
@@ -735,12 +736,15 @@ static inline bool ublk_get_req_ref(struct ublk_io *io)
return refcount_inc_not_zero(&io->ref);
}
static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
{
- if (refcount_dec_and_test(&io->ref))
- __ublk_complete_rq(req, io);
+ if (!refcount_dec_and_test(&io->ref))
+ return;
+
+ /* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */
+ __ublk_complete_rq(req, io, false);
}
static inline bool ublk_sub_req_ref(struct ublk_io *io)
{
unsigned sub_refs = UBLK_REFCOUNT_INIT - io->task_registered_buffers;
@@ -1046,17 +1050,17 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
-static int ublk_unmap_io(const struct ublk_queue *ubq,
+static int ublk_unmap_io(bool need_map,
const struct request *req,
const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (!ublk_need_map_io(ubq))
+ if (!need_map)
return rq_bytes;
if (ublk_need_unmap_req(req)) {
struct iov_iter iter;
const int dir = ITER_SOURCE;
@@ -1144,13 +1148,13 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
{
return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
}
/* todo: handle partial completion */
-static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io)
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
@@ -1170,11 +1174,11 @@ static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io)
if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE &&
req_op(req) != REQ_OP_DRV_IN)
goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
- unmapped_bytes = ublk_unmap_io(ubq, req, io);
+ unmapped_bytes = ublk_unmap_io(need_map, req, io);
/*
* Extremely impossible since we got data filled in just before
*
* Re-read simply for this unlikely case.
@@ -1747,11 +1751,11 @@ static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
if (ublk_nosrv_should_reissue_outstanding(ub))
blk_mq_requeue_request(req, false);
else {
io->res = -EIO;
- __ublk_complete_rq(req, io);
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
}
}
/*
* Called from ublk char device release handler, when any uring_cmd is
@@ -2392,11 +2396,11 @@ static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
if (buf_idx != UBLK_INVALID_BUF_IDX)
io_buffer_unregister_bvec(cmd, buf_idx, issue_flags);
if (req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = addr;
if (compl)
- __ublk_complete_rq(req, io);
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
if (ret)
goto out;
break;
case UBLK_IO_NEED_GET_DATA:
--
2.45.2
Powered by blists - more mailing lists