lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20241221093710.926309-8-yukuai1@huaweicloud.com>
Date: Sat, 21 Dec 2024 17:37:10 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: axboe@...nel.dk,
	akpm@...ux-foundation.org,
	ming.lei@...hat.com,
	yang.yang@...o.com,
	yukuai3@...wei.com,
	bvanassche@....org
Cc: linux-block@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	yukuai1@...weicloud.com,
	yi.zhang@...wei.com,
	yangerkun@...wei.com
Subject: [PATCH RFC v3 7/7] block/kyber-iosched: switch to use queue async_depth

From: Yu Kuai <yukuai3@...wei.com>

To make code cleaner.

Noted that for kyber, async_depth is still always 75% nr_requests, and
user can't set new value for now.

Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
 block/kyber-iosched.c | 26 +++++---------------------
 1 file changed, 5 insertions(+), 21 deletions(-)

diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index ccfefa6a3669..0909851f22c6 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -157,9 +157,6 @@ struct kyber_queue_data {
 	 */
 	struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
 
-	/* Number of allowed async requests. */
-	unsigned int async_depth;
-
 	struct kyber_cpu_latency __percpu *cpu_latency;
 
 	/* Timer for stats aggregation and adjusting domain tokens. */
@@ -449,11 +446,11 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
 
 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
 {
-	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
+	struct request_queue *q = hctx->queue;
 	struct blk_mq_tags *tags = hctx->sched_tags;
 
-	kqd->async_depth = hctx->queue->nr_requests * KYBER_ASYNC_PERCENT / 100U;
-	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
+	q->async_depth = hctx->queue->nr_requests * KYBER_ASYNC_PERCENT / 100U;
+	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, q->async_depth);
 }
 
 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
@@ -552,11 +549,8 @@ static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
 	 * We use the scheduler tags as per-hardware queue queueing tokens.
 	 * Async requests can be limited at this stage.
 	 */
-	if (!op_is_sync(opf)) {
-		struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
-
-		data->shallow_depth = kqd->async_depth;
-	}
+	if (!op_is_sync(opf))
+		data->shallow_depth = data->q->async_depth;
 }
 
 static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
@@ -952,15 +946,6 @@ KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
 #undef KYBER_DEBUGFS_DOMAIN_ATTRS
 
-static int kyber_async_depth_show(void *data, struct seq_file *m)
-{
-	struct request_queue *q = data;
-	struct kyber_queue_data *kqd = q->elevator->elevator_data;
-
-	seq_printf(m, "%u\n", kqd->async_depth);
-	return 0;
-}
-
 static int kyber_cur_domain_show(void *data, struct seq_file *m)
 {
 	struct blk_mq_hw_ctx *hctx = data;
@@ -986,7 +971,6 @@ static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
 	KYBER_QUEUE_DOMAIN_ATTRS(write),
 	KYBER_QUEUE_DOMAIN_ATTRS(discard),
 	KYBER_QUEUE_DOMAIN_ATTRS(other),
-	{"async_depth", 0400, kyber_async_depth_show},
 	{},
 };
 #undef KYBER_QUEUE_DOMAIN_ATTRS
-- 
2.39.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ