[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250423205127.2976981-4-csander@purestorage.com>
Date: Wed, 23 Apr 2025 14:51:27 -0600
From: Caleb Sander Mateos <csander@...estorage.com>
To: Jens Axboe <axboe@...nel.dk>
Cc: linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
Caleb Sander Mateos <csander@...estorage.com>
Subject: [PATCH 3/3] block: avoid hctx spinlock for plug with multiple queues
blk_mq_flush_plug_list() has a fast path if all requests in the plug
are destined for the same request_queue. It calls ->queue_rqs() with the
whole batch of requests, falling back on ->queue_rq() for any requests
not handled by ->queue_rqs(). However, if the requests are destined for
multiple queues, blk_mq_flush_plug_list() has a slow path that calls
blk_mq_dispatch_list() repeatedly to filter the requests by ctx/hctx.
Each queue's requests are inserted into the hctx's dispatch list under a
spinlock, then __blk_mq_sched_dispatch_requests() takes them out of the
dispatch list (taking the spinlock again), and finally
blk_mq_dispatch_rq_list() calls ->queue_rq() on each request.
Acquiring the hctx spinlock twice and calling ->queue_rq() instead of
->queue_rqs() makes the slow path significantly more expensive. Thus,
batching more requests into a single plug (e.g. io_uring_enter syscall)
can counterintuitively hurt performance by causing the plug to span
multiple queues. We have observed 2-3% of CPU time spent acquiring the
hctx spinlock alone on workloads issuing requests to multiple NVMe
devices in the same io_uring SQE batches.
Add a medium path in blk_mq_flush_plug_list() for plugs that don't have
elevators or come from a schedule, but do span multiple queues. Filter
the requests by queue and call ->queue_rqs()/->queue_rq() on the list of
requests destined to each request_queue.
With this change, we no longer see any CPU time spent in _raw_spin_lock
from blk_mq_flush_plug_list and throughput increases accordingly.
Signed-off-by: Caleb Sander Mateos <csander@...estorage.com>
---
block/blk-mq.c | 45 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 44 insertions(+), 1 deletion(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a777cb361ee3..f820c6c0cb1a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2834,10 +2834,34 @@ static void __blk_mq_flush_list(struct request_queue *q, struct rq_list *rqs)
if (blk_queue_quiesced(q))
return;
q->mq_ops->queue_rqs(rqs);
}
+static void blk_mq_extract_queue_requests(struct rq_list *rqs,
+ struct rq_list *queue_rqs,
+ unsigned *queue_depth)
+{
+ struct rq_list matched_rqs = {}, unmatched_rqs = {};
+ struct request *rq = rq_list_pop(rqs);
+ struct request_queue *this_q = rq->q;
+ unsigned depth = 1;
+
+ rq_list_add_tail(&matched_rqs, rq);
+ while ((rq = rq_list_pop(rqs))) {
+ if (rq->q == this_q) {
+ rq_list_add_tail(&matched_rqs, rq);
+ depth++;
+ } else {
+ rq_list_add_tail(&unmatched_rqs, rq);
+ }
+ }
+
+ *queue_rqs = matched_rqs;
+ *rqs = unmatched_rqs;
+ *queue_depth = depth;
+}
+
static void blk_mq_dispatch_queue_requests(struct rq_list *rqs, unsigned depth)
{
struct request_queue *q = rq_list_peek(rqs)->q;
trace_block_unplug(q, depth, true);
@@ -2900,10 +2924,24 @@ static void blk_mq_dispatch_list(struct rq_list *rqs, bool from_sched)
blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
}
percpu_ref_put(&this_hctx->queue->q_usage_counter);
}
+static void blk_mq_dispatch_multiple_queue_requests(struct rq_list *rqs)
+{
+ do {
+ struct rq_list queue_rqs;
+ unsigned depth;
+
+ blk_mq_extract_queue_requests(rqs, &queue_rqs, &depth);
+ blk_mq_dispatch_queue_requests(&queue_rqs, depth);
+ while (!rq_list_empty(&queue_rqs)) {
+ blk_mq_dispatch_list(&queue_rqs, false);
+ }
+ } while (!rq_list_empty(rqs));
+}
+
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
unsigned int depth;
/*
@@ -2916,11 +2954,16 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
if (plug->rq_count == 0)
return;
depth = plug->rq_count;
plug->rq_count = 0;
- if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
+ if (!plug->has_elevator && !from_schedule) {
+ if (plug->multiple_queues) {
+ blk_mq_dispatch_multiple_queue_requests(&plug->mq_list);
+ return;
+ }
+
blk_mq_dispatch_queue_requests(&plug->mq_list, depth);
if (rq_list_empty(&plug->mq_list))
return;
}
--
2.45.2
Powered by blists - more mailing lists