[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251121052901.1341976-4-yukuai@fnnas.com>
Date: Fri, 21 Nov 2025 13:28:50 +0800
From: Yu Kuai <yukuai@...as.com>
To: axboe@...nel.dk,
nilay@...ux.ibm.com,
bvanassche@....org,
linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: yukuai@...as.com
Subject: [PATCH v6 3/8] blk-mq: factor out a helper blk_mq_limit_depth()
There are no functional changes, just make code cleaner.
Signed-off-by: Yu Kuai <yukuai@...as.com>
---
block/blk-mq.c | 62 ++++++++++++++++++++++++++++++--------------------
1 file changed, 37 insertions(+), 25 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2650c97a75e..6c505ebfab65 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -497,6 +497,42 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
return rq_list_pop(data->cached_rqs);
}
+static void blk_mq_limit_depth(struct blk_mq_alloc_data *data)
+{
+ struct elevator_mq_ops *ops;
+
+ /* If no I/O scheduler has been configured, don't limit requests */
+ if (!data->q->elevator) {
+ blk_mq_tag_busy(data->hctx);
+ return;
+ }
+
+ /*
+ * All requests use scheduler tags when an I/O scheduler is
+ * enabled for the queue.
+ */
+ data->rq_flags |= RQF_SCHED_TAGS;
+
+ /*
+ * Flush/passthrough requests are special and go directly to the
+ * dispatch list, they are not subject to the async_depth limit.
+ */
+ if ((data->cmd_flags & REQ_OP_MASK) == REQ_OP_FLUSH ||
+ blk_op_is_passthrough(data->cmd_flags))
+ return;
+
+ WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
+ data->rq_flags |= RQF_USE_SCHED;
+
+ /*
+ * By default, sync requests have no limit, and async requests are
+ * limited to async_depth.
+ */
+ ops = &data->q->elevator->type->ops;
+ if (ops->limit_depth)
+ ops->limit_depth(data->cmd_flags, data);
+}
+
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
{
struct request_queue *q = data->q;
@@ -515,31 +551,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
data->ctx = blk_mq_get_ctx(q);
data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
- if (q->elevator) {
- /*
- * All requests use scheduler tags when an I/O scheduler is
- * enabled for the queue.
- */
- data->rq_flags |= RQF_SCHED_TAGS;
-
- /*
- * Flush/passthrough requests are special and go directly to the
- * dispatch list.
- */
- if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
- !blk_op_is_passthrough(data->cmd_flags)) {
- struct elevator_mq_ops *ops = &q->elevator->type->ops;
-
- WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
-
- data->rq_flags |= RQF_USE_SCHED;
- if (ops->limit_depth)
- ops->limit_depth(data->cmd_flags, data);
- }
- } else {
- blk_mq_tag_busy(data->hctx);
- }
-
+ blk_mq_limit_depth(data);
if (data->flags & BLK_MQ_REQ_RESERVED)
data->rq_flags |= RQF_RESV;
--
2.51.0
Powered by blists - more mailing lists