[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241217024047.1091893-5-yukuai1@huaweicloud.com>
Date: Tue, 17 Dec 2024 10:40:47 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: axboe@...nel.dk,
akpm@...ux-foundation.org,
ming.lei@...hat.com,
yang.yang@...o.com,
bvanassche@....org,
osandov@...com,
paolo.valente@...aro.org
Cc: linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
yukuai3@...wei.com,
yukuai1@...weicloud.com,
yi.zhang@...wei.com,
yangerkun@...wei.com
Subject: [PATCH RFC v2 4/4] block/mq-deadline: introduce min_async_depth
From: Yu Kuai <yukuai3@...wei.com>
min_shallow_depth must be less or equal to any shallow_depth value, and
it's 1 currently, and this will change default wake_batch to 1, causing
performance degradation for fast disk with high concurrency. This patch
make following changes:
- set default minimal async_depth to 64, to avoid performance
degradation in the commen case. And user can set lower value if
necessary.
- disable throttling asynchronous requests by default, to prevent
performance degradation in some special setup. User must set a value
to async_depth to enable it.
- if async_depth is set already, don't reset it if user sets new
nr_requests.
Fixes: 07757588e507 ("block/mq-deadline: Reserve 25% of scheduler tags for synchronous requests")
Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
block/mq-deadline.c | 19 ++++++++++++++++---
1 file changed, 16 insertions(+), 3 deletions(-)
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 1f0d175a941e..9be0a33985ce 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -24,6 +24,16 @@
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
+/*
+ * async_depth is used to reserve scheduler tags for synchronous requests,
+ * and the value will affect sbitmap wake_batch. The default minimal value is 64
+ * because the corresponding wake_batch is 8, and lower wake_batch may affect
+ * IO performance.
+ */
+static unsigned int min_async_depth = 64;
+module_param(min_async_depth, int, 0444);
+MODULE_PARM_DESC(min_async_depth, "The minimal number of tags available for asynchronous requests");
+
/*
* See Documentation/block/deadline-iosched.rst
*/
@@ -513,9 +523,12 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
struct deadline_data *dd = q->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
- dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
+ if (q->nr_requests > min_async_depth)
+ sbitmap_queue_min_shallow_depth(&tags->bitmap_tags,
+ min_async_depth);
- sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
+ if (q->nr_requests <= dd->async_depth)
+ dd->async_depth = 0;
}
/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
@@ -814,7 +827,7 @@ STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MA
STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
-STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
+STORE_INT(deadline_async_depth_store, &dd->async_depth, min_async_depth, INT_MAX);
STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
#undef STORE_FUNCTION
#undef STORE_INT
--
2.39.2
Powered by blists - more mailing lists