[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230111130159.3741753-8-shikemeng@huaweicloud.com>
Date: Wed, 11 Jan 2023 21:01:52 +0800
From: Kemeng Shi <shikemeng@...weicloud.com>
To: hch@....de, axboe@...nel.dk, dwagner@...e.de, hare@...e.de,
ming.lei@...hat.com, linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: john.garry@...wei.com, jack@...e.cz
Subject: [PATCH v3 07/14] blk-mq: make blk_mq_commit_rqs a general function for all commits
1. rename orignal blk_mq_commit_rqs to blk_mq_plug_commit_rqs as
trace_block_unplug is only needed when we dispatch request from plug list.
We need a commit_rqs wrapper for this case. Besides, this patch
adds queued check and only commits request if any request was queued
to keep commit behavior consistent and remove unnecessary commit.
2. add new blk_mq_commit_rqs for general commits. Current
blk_mq_commit_rqs will not clear queued as queued clearing is not
wanted generally.
3. document rule for unusual cases which need explicit commit_rqs.
Suggested-by: Christoph Hellwig <hch@....de>
Signed-off-by: Kemeng Shi <shikemeng@...weicloud.com>
---
block/blk-mq.c | 38 ++++++++++++++++++++++++++------------
1 file changed, 26 insertions(+), 12 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c6cc3feb3b84..98f6003474f2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2007,6 +2007,29 @@ static void blk_mq_release_budgets(struct request_queue *q,
}
}
+/* blk_mq_commit_rqs and blk_mq_plug_commit_rqs notify driver using
+ * bd->last that there is no more requests. (See comment in struct
+ * blk_mq_ops for commit_rqs for details)
+ * Attention, we should explicitly call this in unusual cases:
+ * 1) did not queue everything initially scheduled to queue
+ * 2) the last attempt to queue a request failed
+ */
+static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued)
+{
+ if (hctx->queue->mq_ops->commit_rqs && queued) {
+ hctx->queue->mq_ops->commit_rqs(hctx);
+ }
+}
+
+static void blk_mq_plug_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued)
+{
+ if (hctx->queue->mq_ops->commit_rqs && *queued) {
+ trace_block_unplug(hctx->queue, *queued, true);
+ hctx->queue->mq_ops->commit_rqs(hctx);
+ }
+ *queued = 0;
+}
+
/*
* Returns true if we did some work AND can potentially do more.
*/
@@ -2555,15 +2578,6 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
spin_unlock(&ctx->lock);
}
-static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued)
-{
- if (hctx->queue->mq_ops->commit_rqs) {
- trace_block_unplug(hctx->queue, *queued, true);
- hctx->queue->mq_ops->commit_rqs(hctx);
- }
- *queued = 0;
-}
-
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
unsigned int nr_segs)
{
@@ -2700,7 +2714,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
if (hctx != rq->mq_hctx) {
if (hctx)
- blk_mq_commit_rqs(hctx, &queued);
+ blk_mq_plug_commit_rqs(hctx, &queued);
hctx = rq->mq_hctx;
}
@@ -2712,7 +2726,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
blk_mq_request_bypass_insert(rq, false, true);
- blk_mq_commit_rqs(hctx, &queued);
+ blk_mq_plug_commit_rqs(hctx, &queued);
return;
default:
blk_mq_end_request(rq, ret);
@@ -2726,7 +2740,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
* there was more coming, but that turned out to be a lie.
*/
if (errors)
- blk_mq_commit_rqs(hctx, &queued);
+ blk_mq_plug_commit_rqs(hctx, &queued);
}
static void __blk_mq_flush_plug_list(struct request_queue *q,
--
2.30.0
Powered by blists - more mailing lists