[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1597850436-116171-9-git-send-email-john.garry@huawei.com>
Date: Wed, 19 Aug 2020 23:20:26 +0800
From: John Garry <john.garry@...wei.com>
To: <axboe@...nel.dk>, <jejb@...ux.ibm.com>,
<martin.petersen@...cle.com>, <don.brace@...rosemi.com>,
<kashyap.desai@...adcom.com>, <ming.lei@...hat.com>,
<bvanassche@....org>, <dgilbert@...erlog.com>,
<paolo.valente@...aro.org>, <hare@...e.de>, <hch@....de>
CC: <sumit.saxena@...adcom.com>, <linux-block@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <linux-scsi@...r.kernel.org>,
<esc.storagedev@...rosemi.com>, <megaraidlinux.pdl@...adcom.com>,
<chenxiang66@...ilicon.com>, <luojiaxing@...wei.com>,
John Garry <john.garry@...wei.com>
Subject: [PATCH v8 08/18] blk-mq: Record nr_active_requests per queue for when using shared sbitmap
The per-hctx nr_active value can no longer be used to fairly assign a share
of tag depth per request queue for when using a shared sbitmap, as it does
not consider that the tags are shared tags over all hctx's.
For this case, record the nr_active_requests per request_queue, and make
the judgement based on that value.
Tested-by: Don Brace<don.brace@...rosemi.com> #SCSI resv cmds patches used
Co-developed-with: Kashyap Desai <kashyap.desai@...adcom.com>
Signed-off-by: John Garry <john.garry@...wei.com>
---
block/blk-core.c | 2 ++
block/blk-mq.c | 4 ++--
block/blk-mq.h | 26 ++++++++++++++++++++++++--
include/linux/blkdev.h | 2 ++
4 files changed, 30 insertions(+), 4 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index d9d632639bd1..360975255a2a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -542,6 +542,8 @@ struct request_queue *blk_alloc_queue(int node_id)
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->node = node_id;
+ atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
+
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a500d1dfa1bd..ebb72a59b433 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -519,7 +519,7 @@ void blk_mq_free_request(struct request *rq)
ctx->rq_completed[rq_is_sync(rq)]++;
if (rq->rq_flags & RQF_MQ_INFLIGHT)
- atomic_dec(&hctx->nr_active);
+ __blk_mq_dec_active_requests(hctx);
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->backing_dev_info);
@@ -1127,7 +1127,7 @@ static bool blk_mq_get_driver_tag(struct request *rq)
if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
rq->rq_flags |= RQF_MQ_INFLIGHT;
- atomic_inc(&hctx->nr_active);
+ __blk_mq_inc_active_requests(hctx);
}
hctx->tags->rqs[rq->tag] = rq;
return true;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 56dc37c21908..25ec73078e95 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -199,6 +199,28 @@ static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
return true;
}
+static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
+{
+ if (blk_mq_is_sbitmap_shared(hctx->flags))
+ atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
+ else
+ atomic_inc(&hctx->nr_active);
+}
+
+static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
+{
+ if (blk_mq_is_sbitmap_shared(hctx->flags))
+ atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
+ else
+ atomic_dec(&hctx->nr_active);
+}
+
+static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
+{
+ if (blk_mq_is_sbitmap_shared(hctx->flags))
+ return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
+ return atomic_read(&hctx->nr_active);
+}
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
@@ -207,7 +229,7 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
if (rq->rq_flags & RQF_MQ_INFLIGHT) {
rq->rq_flags &= ~RQF_MQ_INFLIGHT;
- atomic_dec(&hctx->nr_active);
+ __blk_mq_dec_active_requests(hctx);
}
}
@@ -287,7 +309,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
* Allow at least some tags
*/
depth = max((bt->sb.depth + users - 1) / users, 4U);
- return atomic_read(&hctx->nr_active) < depth;
+ return __blk_mq_active_requests(hctx) < depth;
}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bb5636cc17b9..1d85235611e1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -484,6 +484,8 @@ struct request_queue {
struct timer_list timeout;
struct work_struct timeout_work;
+ atomic_t nr_active_requests_shared_sbitmap;
+
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
--
2.26.2
Powered by blists - more mailing lists