[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260106-blk_unlikely-v1-1-90fb556a6776@debian.org>
Date: Tue, 06 Jan 2026 06:26:57 -0800
From: Breno Leitao <leitao@...ian.org>
To: Jens Axboe <axboe@...nel.dk>
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
tj@...nel.org, josef@...icpanda.com, kernel-team@...a.com,
Breno Leitao <leitao@...ian.org>
Subject: [PATCH] blk-rq-qos: Remove unlikely() hints from QoS checks
The unlikely() annotations on QUEUE_FLAG_QOS_ENABLED checks are
counterproductive. Writeback throttling (WBT) might be enabled by
default, mainly because CONFIG_BLK_WBT_MQ defaults to 'y'.
Branch profiling on Meta servers, which have WBT enabled, confirms 100%
misprediction rates on these checks.
Remove the unlikely() annotations to let the CPU's branch predictor
learn the actual behavior, potentially improving I/O path performance.
Signed-off-by: Breno Leitao <leitao@...ian.org>
---
block/blk-rq-qos.h | 25 +++++++++----------------
1 file changed, 9 insertions(+), 16 deletions(-)
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index b538f2c0febc..a747a504fe42 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_cleanup(q->rq_qos, bio);
}
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos && !blk_rq_is_passthrough(rq))
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
+ q->rq_qos && !blk_rq_is_passthrough(rq))
__rq_qos_done(q->rq_qos, rq);
}
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_issue(q->rq_qos, rq);
}
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_requeue(q->rq_qos, rq);
}
@@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos) {
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_THROTTLED);
__rq_qos_throttle(q->rq_qos, bio);
}
@@ -172,16 +168,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_track(q->rq_qos, rq, bio);
}
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos) {
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_MERGED);
__rq_qos_merge(q->rq_qos, rq, bio);
}
@@ -189,8 +183,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
{
- if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
- q->rq_qos)
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_queue_depth_changed(q->rq_qos);
}
---
base-commit: c92f16e43c48f367af2f63956eb8385cb05816f3
change-id: 20260106-blk_unlikely-6ea4cf76b104
Best regards,
--
Breno Leitao <leitao@...ian.org>
Powered by blists - more mailing lists