[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180529185541.GB23487@vader>
Date: Tue, 29 May 2018 11:55:41 -0700
From: Omar Sandoval <osandov@...ndov.com>
To: Jianchao Wang <jianchao.w.wang@...cle.com>
Cc: axboe@...nel.dk, holger@...lied-asynchrony.com,
linux-block@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH V2 1/2] blk-mq: abstract out blk-mq-sched rq list
iteration bio merge helper
On Wed, May 23, 2018 at 02:33:21PM +0800, Jianchao Wang wrote:
> From: Jens Axboe <axboe@...nel.dk>
>
> No functional changes in this patch, just a prep patch for utilizing
> this in an IO scheduler.
Reviewed-by: Omar Sandoval <osandov@...com>
> Signed-off-by: Jens Axboe <axboe@...nel.dk>
> ---
> block/blk-mq-sched.c | 34 ++++++++++++++++++++++++----------
> include/linux/blk-mq.h | 3 ++-
> 2 files changed, 26 insertions(+), 11 deletions(-)
>
> diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
> index 25c14c5..b0f2c2a 100644
> --- a/block/blk-mq-sched.c
> +++ b/block/blk-mq-sched.c
> @@ -268,19 +268,16 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
> EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
>
> /*
> - * Reverse check our software queue for entries that we could potentially
> - * merge with. Currently includes a hand-wavy stop count of 8, to not spend
> - * too much time checking for merges.
> + * Iterate list of requests and see if we can merge this bio with any
> + * of them.
> */
> -static bool blk_mq_attempt_merge(struct request_queue *q,
> - struct blk_mq_ctx *ctx, struct bio *bio)
> +bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
> + struct bio *bio)
> {
> struct request *rq;
> int checked = 8;
>
> - lockdep_assert_held(&ctx->lock);
> -
> - list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
> + list_for_each_entry_reverse(rq, list, queuelist) {
> bool merged = false;
>
> if (!checked--)
> @@ -305,13 +302,30 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
> continue;
> }
>
> - if (merged)
> - ctx->rq_merged++;
> return merged;
> }
>
> return false;
> }
> +EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);
> +
> +/*
> + * Reverse check our software queue for entries that we could potentially
> + * merge with. Currently includes a hand-wavy stop count of 8, to not spend
> + * too much time checking for merges.
> + */
> +static bool blk_mq_attempt_merge(struct request_queue *q,
> + struct blk_mq_ctx *ctx, struct bio *bio)
> +{
> + lockdep_assert_held(&ctx->lock);
> +
> + if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) {
> + ctx->rq_merged++;
> + return true;
> + }
> +
> + return false;
> +}
>
> bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
> {
> diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
> index ebc34a5..fb35517 100644
> --- a/include/linux/blk-mq.h
> +++ b/include/linux/blk-mq.h
> @@ -259,7 +259,8 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
> void blk_mq_kick_requeue_list(struct request_queue *q);
> void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
> void blk_mq_complete_request(struct request *rq);
> -
> +bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
> + struct bio *bio);
> bool blk_mq_queue_stopped(struct request_queue *q);
> void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
> void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
> --
> 2.7.4
>
Powered by blists - more mailing lists