[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230618160738.54385-6-yukuai1@huaweicloud.com>
Date: Mon, 19 Jun 2023 00:07:36 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: bvanassche@....org, axboe@...nel.dk
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
yukuai3@...wei.com, yukuai1@...weicloud.com, yi.zhang@...wei.com,
yangerkun@...wei.com
Subject: [PATCH RFC 5/7] blk-mq: record the number of times fail to get driver tag while sharing tags
From: Yu Kuai <yukuai3@...wei.com>
Add a atomic counter to record such times, such counter will be used to
adjust the number of tags assigned to active queues. And this counter will
degrade each seconds so that it will only represent io pressure
recently.
Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
block/blk-mq-tag.c | 22 ++++++++++++++++++++--
include/linux/blkdev.h | 2 ++
2 files changed, 22 insertions(+), 2 deletions(-)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index e0137206c02b..5e5742c7277a 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -45,6 +45,17 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
users);
}
+static void update_tag_sharing_busy(struct tag_sharing *tag_sharing)
+{
+ unsigned int count = atomic_inc_return(&tag_sharing->fail_count);
+ unsigned long last_period = READ_ONCE(tag_sharing->period);
+
+ if (time_after(jiffies, last_period + HZ) &&
+ cmpxchg_relaxed(&tag_sharing->period, last_period, jiffies) ==
+ last_period)
+ atomic_sub(count / 2, &tag_sharing->fail_count);
+}
+
void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
@@ -57,12 +68,16 @@ void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
if (test_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags) ||
- test_and_set_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags))
+ test_and_set_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags)) {
+ update_tag_sharing_busy(&q->tag_sharing);
return;
+ }
} else {
if (test_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state) ||
- test_and_set_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state))
+ test_and_set_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state)) {
+ update_tag_sharing_busy(&hctx->tag_sharing);
return;
+ }
}
spin_lock_irq(&tags->lock);
@@ -152,8 +167,11 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
}
spin_lock_irq(&tags->lock);
+
list_del_init(&tag_sharing->node);
tag_sharing->available_tags = tags->nr_tags;
+ atomic_set(&tag_sharing->fail_count, 0);
+
__blk_mq_driver_tag_idle(hctx);
WRITE_ONCE(tags->ctl.active_queues, tags->ctl.active_queues - 1);
WRITE_ONCE(tags->ctl.share_queues, tags->ctl.active_queues);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e5111bedfd8d..f3faaf5f6504 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -378,6 +378,8 @@ struct blk_independent_access_ranges {
struct tag_sharing {
struct list_head node;
unsigned int available_tags;
+ atomic_t fail_count;
+ unsigned long period;
};
struct request_queue {
--
2.39.2
Powered by blists - more mailing lists