[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250409024955.3626275-1-csander@purestorage.com>
Date: Tue, 8 Apr 2025 20:49:54 -0600
From: Caleb Sander Mateos <csander@...estorage.com>
To: Ming Lei <ming.lei@...hat.com>,
Jens Axboe <axboe@...nel.dk>
Cc: Caleb Sander Mateos <csander@...estorage.com>,
linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH] ublk: skip blk_mq_tag_to_rq() bounds check
The ublk driver calls blk_mq_tag_to_rq() in several places.
blk_mq_tag_to_rq() tolerates an invalid tag for the tagset, checking it
against the number of tags and returning NULL if it is out of bounds.
But all the calls from the ublk driver have already verified the tag
against the ublk queue's queue depth. In ublk_commit_completion(),
ublk_handle_need_get_data(), and case UBLK_IO_COMMIT_AND_FETCH_REQ, the
tag has already been checked in __ublk_ch_uring_cmd(). In
ublk_abort_queue(), the loop bounds the tag by the queue depth. In
__ublk_check_and_get_req(), the tag has already been checked in
__ublk_ch_uring_cmd(), in the case of ublk_register_io_buf(), or in
ublk_check_and_get_req().
So just index the tagset's rqs array directly in the ublk driver.
Convert the tags to unsigned, as blk_mq_tag_to_rq() does.
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
drivers/block/ublk_drv.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 2fd05c1bd30b..5b07329f5197 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -210,11 +210,11 @@ struct ublk_params_header {
};
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- struct ublk_queue *ubq, int tag, size_t offset);
+ struct ublk_queue *ubq, unsigned tag, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
int tag);
static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
{
@@ -1515,11 +1515,11 @@ static void ublk_commit_completion(struct ublk_device *ub,
/* now this cmd slot is owned by nbd driver */
io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
io->res = ub_cmd->result;
/* find the io request and complete */
- req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
+ req = ub->tag_set.tags[qid]->rqs[tag];
if (WARN_ON_ONCE(unlikely(!req)))
return;
if (req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = ub_cmd->zone_append_lba;
@@ -1533,11 +1533,11 @@ static void ublk_commit_completion(struct ublk_device *ub,
* blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
* context, so everything is serialized.
*/
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
{
- int i;
+ unsigned i;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
@@ -1545,11 +1545,11 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
/*
* Either we fail the request or ublk_rq_task_work_cb
* will do it
*/
- rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
+ rq = ub->tag_set.tags[ubq->q_id]->rqs[i];
if (rq && blk_mq_request_started(rq)) {
io->flags |= UBLK_IO_FLAG_ABORTED;
__ublk_fail_req(ubq, io, rq);
}
}
@@ -1824,14 +1824,14 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
complete_all(&ub->completion);
mutex_unlock(&ub->mutex);
}
static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
- int tag)
+ unsigned tag)
{
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
+ struct request *req = ub->tag_set.tags[q_id]->rqs[tag];
ublk_queue_cmd(ubq, req);
}
static inline int ublk_check_cmd_op(u32 cmd_op)
@@ -1989,11 +1989,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
+ req = ub->tag_set.tags[ub_cmd->q_id]->rqs[tag];
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
if (ublk_need_map_io(ubq)) {
@@ -2033,18 +2033,18 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
__func__, cmd_op, tag, ret, io->flags);
return ret;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- struct ublk_queue *ubq, int tag, size_t offset)
+ struct ublk_queue *ubq, unsigned tag, size_t offset)
{
struct request *req;
if (!ublk_need_req_ref(ubq))
return NULL;
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ req = ub->tag_set.tags[ubq->q_id]->rqs[tag];
if (!req)
return NULL;
if (!ublk_get_req_ref(ubq, req))
return NULL;
--
2.45.2
Powered by blists - more mailing lists