From ae72bb9f67d01b3a02cee80c81a712f775d13c32 Mon Sep 17 00:00:00 2001
From: Bart Van Assche <bart.vanassche@sandisk.com>
Date: Tue, 20 Dec 2016 12:00:47 +0100
Subject: [PATCH 2/3] blk-mq: Make the blk_mq_{get,put}_tag() callers specify
 the tag set

This patch does not change any functionality.
---
 block/blk-mq-tag.c | 29 ++++++++++----------
 block/blk-mq-tag.h |  7 +++--
 block/blk-mq.c     | 80 ++++++++++++++++++++++++++++++++++++------------------
 block/blk-mq.h     |  9 ++++--
 4 files changed, 77 insertions(+), 48 deletions(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index dcf5ce3ba4bf..890d634db0ee 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -156,47 +156,46 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
 	return tag;
 }
 
-static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
+static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
+				     struct blk_mq_tags *tags)
 {
 	int tag;
 
-	tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
-		     data->hctx->tags);
+	tag = bt_get(data, &tags->bitmap_tags, data->hctx, tags);
 	if (tag >= 0)
-		return tag + data->hctx->tags->nr_reserved_tags;
+		return tag + tags->nr_reserved_tags;
 
 	return BLK_MQ_TAG_FAIL;
 }
 
-static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
+static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data,
+					      struct blk_mq_tags *tags)
 {
 	int tag;
 
-	if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
+	if (unlikely(!tags->nr_reserved_tags)) {
 		WARN_ON_ONCE(1);
 		return BLK_MQ_TAG_FAIL;
 	}
 
-	tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL,
-		     data->hctx->tags);
+	tag = bt_get(data, &tags->breserved_tags, NULL, tags);
 	if (tag < 0)
 		return BLK_MQ_TAG_FAIL;
 
 	return tag;
 }
 
-unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
+unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data,
+			    struct blk_mq_tags *tags)
 {
 	if (data->flags & BLK_MQ_REQ_RESERVED)
-		return __blk_mq_get_reserved_tag(data);
-	return __blk_mq_get_tag(data);
+		return __blk_mq_get_reserved_tag(data, tags);
+	return __blk_mq_get_tag(data, tags);
 }
 
-void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-		    unsigned int tag)
+void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
+		    struct blk_mq_ctx *ctx, unsigned int tag)
 {
-	struct blk_mq_tags *tags = hctx->tags;
-
 	if (tag >= tags->nr_reserved_tags) {
 		const int real_tag = tag - tags->nr_reserved_tags;
 
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index d1662734dc53..84186a11d2e0 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -23,9 +23,10 @@ struct blk_mq_tags {
 extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
 extern void blk_mq_free_tags(struct blk_mq_tags *tags);
 
-extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
-extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-			   unsigned int tag);
+extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data,
+				   struct blk_mq_tags *tags);
+extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
+			   struct blk_mq_ctx *ctx, unsigned int tag);
 extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
 extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 35e1162602f5..b68b7fc43e46 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -220,12 +220,13 @@ EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
 				       unsigned int op)
 {
+	struct blk_mq_tags *tags = data->hctx->tags;
 	struct request *rq;
 	unsigned int tag;
 
-	tag = blk_mq_get_tag(data);
+	tag = blk_mq_get_tag(data, tags);
 	if (tag != BLK_MQ_TAG_FAIL) {
-		rq = data->hctx->tags->rqs[tag];
+		rq = tags->rqs[tag];
 
 		if (blk_mq_tag_busy(data->hctx)) {
 			rq->rq_flags = RQF_MQ_INFLIGHT;
@@ -339,7 +340,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 
 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
 	clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
-	blk_mq_put_tag(hctx, ctx, tag);
+	blk_mq_put_tag(hctx, hctx->tags, ctx, tag);
 	blk_queue_exit(q);
 }
 
@@ -1554,8 +1555,8 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 	return cookie;
 }
 
-void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
-			unsigned int hctx_idx)
+void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
+		     unsigned int hctx_idx)
 {
 	struct page *page;
 
@@ -1581,23 +1582,19 @@ void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 		kmemleak_free(page_address(page));
 		__free_pages(page, page->private);
 	}
+}
 
+void blk_mq_free_rq_map(struct blk_mq_tags *tags)
+{
 	kfree(tags->rqs);
 
 	blk_mq_free_tags(tags);
 }
 
-static size_t order_to_size(unsigned int order)
-{
-	return (size_t)PAGE_SIZE << order;
-}
-
-struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
-				       unsigned int hctx_idx)
+struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
+					unsigned int hctx_idx)
 {
 	struct blk_mq_tags *tags;
-	unsigned int i, j, entries_per_page, max_order = 4;
-	size_t rq_size, left;
 
 	tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
 				set->numa_node,
@@ -1605,8 +1602,6 @@ struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 	if (!tags)
 		return NULL;
 
-	INIT_LIST_HEAD(&tags->page_list);
-
 	tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
 				 set->numa_node);
@@ -1615,6 +1610,22 @@ struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 		return NULL;
 	}
 
+	return tags;
+}
+
+static size_t order_to_size(unsigned int order)
+{
+	return (size_t)PAGE_SIZE << order;
+}
+
+int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
+		     unsigned int hctx_idx)
+{
+	unsigned int i, j, entries_per_page, max_order = 4;
+	size_t rq_size, left;
+
+	INIT_LIST_HEAD(&tags->page_list);
+
 	/*
 	 * rq_size is the size of the request plus driver payload, rounded
 	 * to the cacheline size
@@ -1674,11 +1685,11 @@ struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 			i++;
 		}
 	}
-	return tags;
+	return 0;
 
 fail:
-	blk_mq_free_rq_map(set, tags, hctx_idx);
-	return NULL;
+	blk_mq_free_rqs(set, tags, hctx_idx);
+	return -ENOMEM;
 }
 
 /*
@@ -1899,7 +1910,13 @@ static void blk_mq_map_swqueue(struct request_queue *q,
 		hctx_idx = q->mq_map[i];
 		/* unmapped hw queue can be remapped after CPU topo changed */
 		if (!set->tags[hctx_idx]) {
-			set->tags[hctx_idx] = blk_mq_init_rq_map(set, hctx_idx);
+			set->tags[hctx_idx] = blk_mq_alloc_rq_map(set,
+								  hctx_idx);
+			if (blk_mq_alloc_rqs(set, set->tags[hctx_idx],
+					     hctx_idx) < 0) {
+				blk_mq_free_rq_map(set->tags[hctx_idx]);
+				set->tags[hctx_idx] = NULL;
+			}
 
 			/*
 			 * If tags initialization fail for some hctx,
@@ -1932,7 +1949,8 @@ static void blk_mq_map_swqueue(struct request_queue *q,
 			 * allocation
 			 */
 			if (i && set->tags[i]) {
-				blk_mq_free_rq_map(set, set->tags[i], i);
+				blk_mq_free_rqs(set, set->tags[i], i);
+				blk_mq_free_rq_map(set->tags[i]);
 				set->tags[i] = NULL;
 			}
 			hctx->tags = NULL;
@@ -2102,7 +2120,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 
 		if (hctx) {
 			if (hctx->tags) {
-				blk_mq_free_rq_map(set, hctx->tags, j);
+				blk_mq_free_rqs(set, set->tags[j], j);
+				blk_mq_free_rq_map(hctx->tags);
 				set->tags[j] = NULL;
 			}
 			blk_mq_exit_hctx(q, set, hctx, j);
@@ -2304,16 +2323,21 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
 	int i;
 
 	for (i = 0; i < set->nr_hw_queues; i++) {
-		set->tags[i] = blk_mq_init_rq_map(set, i);
+		set->tags[i] = blk_mq_alloc_rq_map(set, i);
 		if (!set->tags[i])
 			goto out_unwind;
+		if (blk_mq_alloc_rqs(set, set->tags[i], i) < 0)
+			goto free_rq_map;
 	}
 
 	return 0;
 
 out_unwind:
-	while (--i >= 0)
-		blk_mq_free_rq_map(set, set->tags[i], i);
+	while (--i >= 0) {
+		blk_mq_free_rqs(set, set->tags[i], i);
+free_rq_map:
+		blk_mq_free_rq_map(set->tags[i]);
+	}
 
 	return -ENOMEM;
 }
@@ -2438,8 +2462,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
 	int i;
 
 	for (i = 0; i < nr_cpu_ids; i++) {
-		if (set->tags[i])
-			blk_mq_free_rq_map(set, set->tags[i], i);
+		if (set->tags[i]) {
+			blk_mq_free_rqs(set, set->tags[i], i);
+			blk_mq_free_rq_map(set->tags[i]);
+		}
 	}
 
 	kfree(set->mq_map);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 898c3c9a60ec..2e98dd8ccee2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -37,10 +37,13 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 /*
  * Internal helpers for allocating/freeing the request map
  */
-void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
-			unsigned int hctx_idx);
-struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
+void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
+		     unsigned int hctx_idx);
+void blk_mq_free_rq_map(struct blk_mq_tags *tags);
+struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
 					unsigned int hctx_idx);
+int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
+		     unsigned int hctx_idx);
 
 /*
  * Internal helpers for request insertion into sw queues
-- 
2.11.0