lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250530080355.1138759-2-yukuai1@huaweicloud.com>
Date: Fri, 30 May 2025 16:03:52 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: axboe@...nel.dk
Cc: linux-block@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	yukuai3@...wei.com,
	yukuai1@...weicloud.com,
	yi.zhang@...wei.com,
	yangerkun@...wei.com,
	johnny.chenyi@...wei.com
Subject: [PATCH RFC 1/4] elevator: introduce global lock for sq_shared elevator

From: Yu Kuai <yukuai3@...wei.com>

Currently, both mq-deadline and bfq have internal global lock, prepare
to convert them to use this high level lock and support batch request
dispatching.

Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
 block/blk-mq-sched.c |  4 +--
 block/blk-mq.c       |  5 ++--
 block/elevator.c     |  1 +
 block/elevator.h     | 61 ++++++++++++++++++++++++++++++++++++++++++--
 4 files changed, 64 insertions(+), 7 deletions(-)

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 55a0fd105147..c1390d3e6381 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -113,7 +113,7 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
 		if (budget_token < 0)
 			break;
 
-		rq = e->type->ops.dispatch_request(hctx);
+		rq = elevator_dispatch_request(hctx);
 		if (!rq) {
 			blk_mq_put_dispatch_budget(q, budget_token);
 			/*
@@ -342,7 +342,7 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
 	enum hctx_type type;
 
 	if (e && e->type->ops.bio_merge) {
-		ret = e->type->ops.bio_merge(q, bio, nr_segs);
+		ret = elevator_bio_merge(q, bio, nr_segs);
 		goto out_put;
 	}
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4806b867e37d..2650b7b28d1e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2637,7 +2637,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
 		WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
 
 		list_add(&rq->queuelist, &list);
-		q->elevator->type->ops.insert_requests(hctx, &list, flags);
+		elevator_insert_requests(hctx, &list, flags);
 	} else {
 		trace_block_rq_insert(rq);
 
@@ -2912,8 +2912,7 @@ static void blk_mq_dispatch_list(struct rq_list *rqs, bool from_sched)
 		spin_unlock(&this_hctx->lock);
 		blk_mq_run_hw_queue(this_hctx, from_sched);
 	} else if (this_hctx->queue->elevator) {
-		this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
-				&list, 0);
+		elevator_insert_requests(this_hctx, &list, 0);
 		blk_mq_run_hw_queue(this_hctx, from_sched);
 	} else {
 		blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
diff --git a/block/elevator.c b/block/elevator.c
index ab22542e6cf0..91df270d9d91 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -144,6 +144,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
 	eq->type = e;
 	kobject_init(&eq->kobj, &elv_ktype);
 	mutex_init(&eq->sysfs_lock);
+	spin_lock_init(&eq->lock);
 	hash_init(eq->hash);
 
 	return eq;
diff --git a/block/elevator.h b/block/elevator.h
index a07ce773a38f..8399dfe5c3b6 100644
--- a/block/elevator.h
+++ b/block/elevator.h
@@ -110,12 +110,12 @@ struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
 /*
  * each queue has an elevator_queue associated with it
  */
-struct elevator_queue
-{
+struct elevator_queue {
 	struct elevator_type *type;
 	void *elevator_data;
 	struct kobject kobj;
 	struct mutex sysfs_lock;
+	spinlock_t lock;
 	unsigned long flags;
 	DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
 };
@@ -186,4 +186,61 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
 void blk_mq_sched_reg_debugfs(struct request_queue *q);
 void blk_mq_sched_unreg_debugfs(struct request_queue *q);
 
+#define elevator_lock(e)	spin_lock_irq(&(e)->lock)
+#define elevator_unlock(e)	spin_unlock_irq(&(e)->lock)
+
+static inline struct request *elevator_dispatch_request(
+		struct blk_mq_hw_ctx *hctx)
+{
+	struct request_queue *q = hctx->queue;
+	struct elevator_queue *e = q->elevator;
+	bool sq_shared = blk_queue_sq_sched(q);
+	struct request *rq;
+
+	if (sq_shared)
+		elevator_lock(e);
+
+	rq = e->type->ops.dispatch_request(hctx);
+
+	if (sq_shared)
+		elevator_unlock(e);
+
+	return rq;
+}
+
+static inline void elevator_insert_requests(struct blk_mq_hw_ctx *hctx,
+					    struct list_head *list,
+					    blk_insert_t flags)
+{
+	struct request_queue *q = hctx->queue;
+	struct elevator_queue *e = q->elevator;
+	bool sq_shared = blk_queue_sq_sched(q);
+
+	if (sq_shared)
+		elevator_lock(e);
+
+	e->type->ops.insert_requests(hctx, list, flags);
+
+	if (sq_shared)
+		elevator_unlock(e);
+}
+
+static inline bool elevator_bio_merge(struct request_queue *q, struct bio *bio,
+				      unsigned int nr_segs)
+{
+	struct elevator_queue *e = q->elevator;
+	bool sq_shared = blk_queue_sq_sched(q);
+	bool ret;
+
+	if (sq_shared)
+		elevator_lock(e);
+
+	ret = e->type->ops.bio_merge(q, bio, nr_segs);
+
+	if (sq_shared)
+		elevator_unlock(e);
+
+	return ret;
+}
+
 #endif /* _ELEVATOR_H */
-- 
2.39.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ