[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230823090441.3986631-4-chengming.zhou@linux.dev>
Date: Wed, 23 Aug 2023 17:04:38 +0800
From: chengming.zhou@...ux.dev
To: axboe@...nel.dk, ming.lei@...hat.com, bvanassche@....org,
hch@....de
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
zhouchengming@...edance.com
Subject: [PATCH v2 3/6] blk-mq-tag: remove bt_for_each()
From: Chengming Zhou <zhouchengming@...edance.com>
Change the only user of bt_for_each() to use the introduced function
__blk_mq_tagset_busy_iter() to specify queue filter when iterating.
Since blk_mq_queue_tag_busy_iter() is only used to iterate over started
requests, __blk_mq_tagset_busy_iter() already have BT_TAG_ITER_STARTED
filter to iterate over started requests only, there should be no
problem.
Only one potential disadvantage I can see is that we lost the
blk_mq_hw_queue_mapped() filter, which maybe not happen for now.
Unmapped hctx was used to dynamically map or unmap when CPU hotplug,
but we don't do this anymore, we always map all possible CPUs now.
So it seems unmapped hctx may only happen if something wrong with
driver's tagset map settings.
Signed-off-by: Chengming Zhou <zhouchengming@...edance.com>
---
block/blk-mq-tag.c | 99 +---------------------------------------------
1 file changed, 1 insertion(+), 98 deletions(-)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 75b33ae6acef..c497d634cfdb 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -241,14 +241,6 @@ void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
tag_array, nr_tags);
}
-struct bt_iter_data {
- struct blk_mq_hw_ctx *hctx;
- struct request_queue *q;
- busy_tag_iter_fn *fn;
- void *data;
- bool reserved;
-};
-
static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
unsigned int bitnr)
{
@@ -263,67 +255,6 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
return rq;
}
-static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
-{
- struct bt_iter_data *iter_data = data;
- struct blk_mq_hw_ctx *hctx = iter_data->hctx;
- struct request_queue *q = iter_data->q;
- struct blk_mq_tag_set *set = q->tag_set;
- struct blk_mq_tags *tags;
- struct request *rq;
- bool ret = true;
-
- if (blk_mq_is_shared_tags(set->flags))
- tags = set->shared_tags;
- else
- tags = hctx->tags;
-
- if (!iter_data->reserved)
- bitnr += tags->nr_reserved_tags;
- /*
- * We can hit rq == NULL here, because the tagging functions
- * test and set the bit before assigning ->rqs[].
- */
- rq = blk_mq_find_and_get_req(tags, bitnr);
- if (!rq)
- return true;
-
- if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
- ret = iter_data->fn(rq, iter_data->data);
- blk_mq_put_rq_ref(rq);
- return ret;
-}
-
-/**
- * bt_for_each - iterate over the requests associated with a hardware queue
- * @hctx: Hardware queue to examine.
- * @q: Request queue to examine.
- * @bt: sbitmap to examine. This is either the breserved_tags member
- * or the bitmap_tags member of struct blk_mq_tags.
- * @fn: Pointer to the function that will be called for each request
- * associated with @hctx that has been assigned a driver tag.
- * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
- * where rq is a pointer to a request. Return true to continue
- * iterating tags, false to stop.
- * @data: Will be passed as third argument to @fn.
- * @reserved: Indicates whether @bt is the breserved_tags member or the
- * bitmap_tags member of struct blk_mq_tags.
- */
-static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
- struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
- void *data, bool reserved)
-{
- struct bt_iter_data iter_data = {
- .hctx = hctx,
- .fn = fn,
- .data = data,
- .reserved = reserved,
- .q = q,
- };
-
- sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
-}
-
struct bt_tags_iter_data {
struct blk_mq_tags *tags;
busy_tag_iter_fn *fn;
@@ -519,35 +450,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
if (!percpu_ref_tryget(&q->q_usage_counter))
return;
- if (blk_mq_is_shared_tags(q->tag_set->flags)) {
- struct blk_mq_tags *tags = q->tag_set->shared_tags;
- struct sbitmap_queue *bresv = &tags->breserved_tags;
- struct sbitmap_queue *btags = &tags->bitmap_tags;
-
- if (tags->nr_reserved_tags)
- bt_for_each(NULL, q, bresv, fn, priv, true);
- bt_for_each(NULL, q, btags, fn, priv, false);
- } else {
- struct blk_mq_hw_ctx *hctx;
- unsigned long i;
-
- queue_for_each_hw_ctx(q, hctx, i) {
- struct blk_mq_tags *tags = hctx->tags;
- struct sbitmap_queue *bresv = &tags->breserved_tags;
- struct sbitmap_queue *btags = &tags->bitmap_tags;
-
- /*
- * If no software queues are currently mapped to this
- * hardware queue, there's nothing to check
- */
- if (!blk_mq_hw_queue_mapped(hctx))
- continue;
-
- if (tags->nr_reserved_tags)
- bt_for_each(hctx, q, bresv, fn, priv, true);
- bt_for_each(hctx, q, btags, fn, priv, false);
- }
- }
+ __blk_mq_tagset_busy_iter(q->tag_set, fn, priv, q);
blk_queue_exit(q);
}
--
2.41.0
Powered by blists - more mailing lists