[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250930071111.1218494-3-yukuai1@huaweicloud.com>
Date: Tue, 30 Sep 2025 15:11:06 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: axboe@...nel.dk,
bvanassche@....org,
ming.lei@...hat.com,
nilay@...ux.ibm.com
Cc: linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
yukuai3@...wei.com,
yukuai1@...weicloud.com,
yi.zhang@...wei.com,
yangerkun@...wei.com,
johnny.chenyi@...wei.com
Subject: [PATCH 2/7] blk-mq-sched: unify elevators checking for async requests
From: Yu Kuai <yukuai3@...wei.com>
bfq and mq-deadline consider sync writes as async requests and only
resver tags for sync reads by async_depth, however, kyber doesn't
consider sync writes as async requests for now.
Consider the case there are lots of dirty pages, and user use fsync to
flush dirty pages. In this case sched_tags can be exhausted by sync writes
and sync reads can stuck waiting for tag. Hence let kyber follow what
mq-deadline and bfq did, and unify async requests checking for all
elevators.
Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
block/bfq-iosched.c | 2 +-
block/blk-mq-sched.h | 5 +++++
block/kyber-iosched.c | 2 +-
block/mq-deadline.c | 2 +-
4 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 4ffbe4383dd2..704900675949 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -697,7 +697,7 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
unsigned int limit, act_idx;
/* Sync reads have full depth available */
- if (op_is_sync(opf) && !op_is_write(opf))
+ if (blk_mq_sched_sync_request(opf))
limit = data->q->nr_requests;
else
limit = bfqd->async_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 8e21a6b1415d..ae747f9053c7 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -103,4 +103,9 @@ static inline void blk_mq_set_min_shallow_depth(struct request_queue *q,
depth);
}
+static inline bool blk_mq_sched_sync_request(blk_opf_t opf)
+{
+ return op_is_sync(opf) && !op_is_write(opf);
+}
+
#endif
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 18efd6ef2a2b..cf243a457175 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -544,7 +544,7 @@ static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
* We use the scheduler tags as per-hardware queue queueing tokens.
* Async requests can be limited at this stage.
*/
- if (!op_is_sync(opf)) {
+ if (!blk_mq_sched_sync_request(opf)) {
struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
data->shallow_depth = kqd->async_depth;
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 3e741d33142d..592dd853f6e5 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -492,7 +492,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
struct deadline_data *dd = data->q->elevator->elevator_data;
/* Do not throttle synchronous reads. */
- if (op_is_sync(opf) && !op_is_write(opf))
+ if (blk_mq_sched_sync_request(opf))
return;
/*
--
2.39.2
Powered by blists - more mailing lists