[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250930071111.1218494-5-yukuai1@huaweicloud.com>
Date: Tue, 30 Sep 2025 15:11:08 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: axboe@...nel.dk,
bvanassche@....org,
ming.lei@...hat.com,
nilay@...ux.ibm.com
Cc: linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
yukuai3@...wei.com,
yukuai1@...weicloud.com,
yi.zhang@...wei.com,
yangerkun@...wei.com,
johnny.chenyi@...wei.com
Subject: [PATCH 4/7] kyber: covert to use request_queue->async_depth
From: Yu Kuai <yukuai3@...wei.com>
Instead of the internal async_depth, remove kqd->async_depth and related
helpers, also remove limit_depth() method that is useless now.
Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
block/kyber-iosched.c | 36 +++---------------------------------
1 file changed, 3 insertions(+), 33 deletions(-)
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index cf243a457175..8bb73e5833a0 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -47,9 +47,8 @@ enum {
* asynchronous requests, we reserve 25% of requests for synchronous
* operations.
*/
- KYBER_ASYNC_PERCENT = 75,
+ KYBER_DEFAULT_ASYNC_PERCENT = 75,
};
-
/*
* Maximum device-wide depth for each scheduling domain.
*
@@ -157,9 +156,6 @@ struct kyber_queue_data {
*/
struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
- /* Number of allowed async requests. */
- unsigned int async_depth;
-
struct kyber_cpu_latency __percpu *cpu_latency;
/* Timer for stats aggregation and adjusting domain tokens. */
@@ -401,10 +397,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
static void kyber_depth_updated(struct request_queue *q)
{
- struct kyber_queue_data *kqd = q->elevator->elevator_data;
-
- kqd->async_depth = q->nr_requests * KYBER_ASYNC_PERCENT / 100U;
- blk_mq_set_min_shallow_depth(q, kqd->async_depth);
+ blk_mq_set_min_shallow_depth(q, q->async_depth);
}
static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq)
@@ -421,6 +414,7 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq)
eq->elevator_data = kqd;
q->elevator = eq;
+ q->async_depth = q->nr_requests * KYBER_DEFAULT_ASYNC_PERCENT / 100;
kyber_depth_updated(q);
return 0;
@@ -538,19 +532,6 @@ static void rq_clear_domain_token(struct kyber_queue_data *kqd,
}
}
-static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
-{
- /*
- * We use the scheduler tags as per-hardware queue queueing tokens.
- * Async requests can be limited at this stage.
- */
- if (!blk_mq_sched_sync_request(opf)) {
- struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
-
- data->shallow_depth = kqd->async_depth;
- }
-}
-
static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
@@ -944,15 +925,6 @@ KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
#undef KYBER_DEBUGFS_DOMAIN_ATTRS
-static int kyber_async_depth_show(void *data, struct seq_file *m)
-{
- struct request_queue *q = data;
- struct kyber_queue_data *kqd = q->elevator->elevator_data;
-
- seq_printf(m, "%u\n", kqd->async_depth);
- return 0;
-}
-
static int kyber_cur_domain_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
@@ -978,7 +950,6 @@ static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
KYBER_QUEUE_DOMAIN_ATTRS(write),
KYBER_QUEUE_DOMAIN_ATTRS(discard),
KYBER_QUEUE_DOMAIN_ATTRS(other),
- {"async_depth", 0400, kyber_async_depth_show},
{},
};
#undef KYBER_QUEUE_DOMAIN_ATTRS
@@ -1004,7 +975,6 @@ static struct elevator_type kyber_sched = {
.exit_sched = kyber_exit_sched,
.init_hctx = kyber_init_hctx,
.exit_hctx = kyber_exit_hctx,
- .limit_depth = kyber_limit_depth,
.bio_merge = kyber_bio_merge,
.prepare_request = kyber_prepare_request,
.insert_requests = kyber_insert_requests,
--
2.39.2
Powered by blists - more mailing lists