[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230627120854.971475-4-chengming.zhou@linux.dev>
Date: Tue, 27 Jun 2023 20:08:53 +0800
From: chengming.zhou@...ux.dev
To: axboe@...nel.dk, tj@...nel.org, hch@....de, ming.lei@...hat.com
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
zhouchengming@...edance.com
Subject: [PATCH 3/4] blk-flush: reuse rq queuelist in flush state machine
From: Chengming Zhou <zhouchengming@...edance.com>
Since we don't need to maintain inflight flush_data requests list
anymore, we can reuse rq->queuelist for flush pending list.
This patch decrease the size of struct request by 16 bytes.
Signed-off-by: Chengming Zhou <zhouchengming@...edance.com>
---
block/blk-flush.c | 12 +++++-------
include/linux/blk-mq.h | 1 -
2 files changed, 5 insertions(+), 8 deletions(-)
diff --git a/block/blk-flush.c b/block/blk-flush.c
index bb7adfc2a5da..81588edbe8b0 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -183,14 +183,13 @@ static void blk_flush_complete_seq(struct request *rq,
/* queue for flush */
if (list_empty(pending))
fq->flush_pending_since = jiffies;
- list_move_tail(&rq->flush.list, pending);
+ list_move_tail(&rq->queuelist, pending);
break;
case REQ_FSEQ_DATA:
- list_del_init(&rq->flush.list);
fq->flush_data_in_flight++;
spin_lock(&q->requeue_lock);
- list_add_tail(&rq->queuelist, &q->flush_list);
+ list_move_tail(&rq->queuelist, &q->flush_list);
spin_unlock(&q->requeue_lock);
blk_mq_kick_requeue_list(q);
break;
@@ -202,7 +201,7 @@ static void blk_flush_complete_seq(struct request *rq,
* flush data request completion path. Restore @rq for
* normal completion and end it.
*/
- list_del_init(&rq->flush.list);
+ list_del_init(&rq->queuelist);
blk_flush_restore_request(rq);
blk_mq_end_request(rq, error);
break;
@@ -258,7 +257,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
fq->flush_running_idx ^= 1;
/* and push the waiting requests to the next stage */
- list_for_each_entry_safe(rq, n, running, flush.list) {
+ list_for_each_entry_safe(rq, n, running, queuelist) {
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
@@ -292,7 +291,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
{
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
struct request *first_rq =
- list_first_entry(pending, struct request, flush.list);
+ list_first_entry(pending, struct request, queuelist);
struct request *flush_rq = fq->flush_rq;
/* C1 described at the top of this file */
@@ -386,7 +385,6 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
static void blk_rq_init_flush(struct request *rq)
{
rq->flush.seq = 0;
- INIT_LIST_HEAD(&rq->flush.list);
rq->rq_flags |= RQF_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
rq->end_io = mq_flush_data_end_io;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 070551197c0e..96644d6f8d18 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -178,7 +178,6 @@ struct request {
struct {
unsigned int seq;
- struct list_head list;
rq_end_io_fn *saved_end_io;
} flush;
--
2.39.2
Powered by blists - more mailing lists