[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250415-ublk_task_per_io-v4-4-54210b91a46f@purestorage.com>
Date: Tue, 15 Apr 2025 18:59:40 -0600
From: Uday Shankar <ushankar@...estorage.com>
To: Ming Lei <ming.lei@...hat.com>, Jens Axboe <axboe@...nel.dk>,
Caleb Sander Mateos <csander@...estorage.com>
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
Uday Shankar <ushankar@...estorage.com>
Subject: [PATCH v4 4/4] ublk: mark ublk_queue as const for
ublk_handle_need_get_data
We now allow multiple tasks to operate on I/Os belonging to the same
queue concurrently. This means that any writes to ublk_queue in the I/O
path are potential sources of data races. Try to prevent these by
marking ublk_queue pointers as const in ublk_handle_need_get_data. Also
move a bit more of the NEED_GET_DATA-specific logic into
ublk_handle_need_get_data, to make the pattern in __ublk_ch_uring_cmd
more uniform.
Suggested-by: Ming Lei <ming.lei@...hat.com>
Signed-off-by: Uday Shankar <ushankar@...estorage.com>
---
drivers/block/ublk_drv.c | 33 ++++++++++++++++++++-------------
1 file changed, 20 insertions(+), 13 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index e2cb54895481aebaa91ab23ba05cf26a950a642f..c8ce9349ca280b8b16040a1242a62b895ee01b5d 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1291,7 +1291,7 @@ static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
ublk_dispatch_req(ubq, pdu->req, issue_flags);
}
-static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
+static void ublk_queue_cmd(const struct ublk_queue *ubq, struct request *rq)
{
struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
@@ -1813,15 +1813,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
mutex_unlock(&ub->mutex);
}
-static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
- int tag)
-{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
-
- ublk_queue_cmd(ubq, req);
-}
-
static inline int ublk_check_cmd_op(u32 cmd_op)
{
u32 ioc_type = _IOC_TYPE(cmd_op);
@@ -1933,6 +1924,21 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
return -EIOCBQUEUED;
}
+static int ublk_handle_need_get_data(const struct ublk_queue *ubq,
+ struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ const struct ublksrv_io_cmd *ub_cmd,
+ struct request *req)
+{
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ return -EINVAL;
+
+ ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
+ ublk_queue_cmd(ubq, req);
+
+ return -EIOCBQUEUED;
+}
+
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
@@ -2025,10 +2031,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
break;
case UBLK_IO_NEED_GET_DATA:
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ ret = ublk_handle_need_get_data(
+ ubq, io, cmd, ub_cmd,
+ blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag));
+ if (ret != -EIOCBQUEUED)
goto out;
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
break;
default:
goto out;
--
2.34.1
Powered by blists - more mailing lists