[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6e384b29-50d2-64bd-0d08-fc0f086c1cbd@huaweicloud.com>
Date: Tue, 10 Dec 2024 09:50:04 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: Bart Van Assche <bvanassche@....org>, Yu Kuai <yukuai1@...weicloud.com>,
axboe@...nel.dk, akpm@...ux-foundation.org, yang.yang@...o.com,
ming.lei@...hat.com, osandov@...com, paolo.valente@...aro.org
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
yi.zhang@...wei.com, yangerkun@...wei.com, "yukuai (C)" <yukuai3@...wei.com>
Subject: Re: [PATCH RFC 1/3] block/mq-deadline: Revert "block/mq-deadline: Fix
the tag reservation code"
Hi,
在 2024/12/10 2:02, Bart Van Assche 写道:
> This is not correct. dd->async_depth can be modified via sysfs.
How about the following patch to fix min_shallow_depth for deadline?
Thanks,
Kuai
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index a9cf8e19f9d1..040ebb0b192d 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -667,8 +667,7 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
struct blk_mq_tags *tags = hctx->sched_tags;
dd->async_depth = q->nr_requests;
-
- sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
+ sbitmap_queue_min_shallow_depth(&tags->bitmap_tags,
dd->async_depth);
}
/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
@@ -1012,6 +1011,47 @@ SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
#undef SHOW_INT
#undef SHOW_JIFFIES
+static ssize_t deadline_async_depth_store(struct elevator_queue *e,
+ const char *page, size_t count)
+{
+ struct deadline_data *dd = e->elevator_data;
+ struct request_queue *q = dd->q;
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long i;
+ int v;
+ int ret = kstrtoint(page, 0, &v);
+
+ if (ret < 0)
+ return ret;
+
+ if (v < 1)
+ v = 1;
+ else if (v > dd->q->nr_requests)
+ v = dd->q->nr_requests;
+
+ if (v == dd->async_depth)
+ return count;
+
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+
+ dd->async_depth = v;
+ if (blk_mq_is_shared_tags(q->tag_set->flags)) {
+ sbitmap_queue_min_shallow_depth(
+ &q->sched_shared_tags->bitmap_tags,
dd->async_depth);
+ } else {
+ queue_for_each_hw_ctx(q, hctx, i)
+ sbitmap_queue_min_shallow_depth(
+ &hctx->sched_tags->bitmap_tags,
+ dd->async_depth);
+ }
+
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
+
+ return count;
+}
+
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)
\
static ssize_t __FUNC(struct elevator_queue *e, const char *page,
size_t count) \
{ \
@@ -1037,7 +1077,6 @@ STORE_JIFFIES(deadline_write_expire_store,
&dd->fifo_expire[DD_WRITE], 0, INT_MA
STORE_JIFFIES(deadline_prio_aging_expire_store,
&dd->prio_aging_expire, 0, INT_MAX);
STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN,
INT_MAX);
STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
-STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
#undef STORE_FUNCTION
#undef STORE_INT
Powered by blists - more mailing lists