lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 18 Sep 2016 09:37:22 +0200
From:   Alexander Gordeev <agordeev@...hat.com>
To:     linux-kernel@...r.kernel.org
Cc:     Alexander Gordeev <agordeev@...hat.com>,
        linux-block@...r.kernel.org
Subject: [PATCH 12/14] blk-mq: Rework blk_mq_init_hctx() function

Rework blk_mq_init_hctx() function so all memory allocations
are done before data initialization and callbacks invocation.
As result, the latter is avoided in tight memory conditions.

CC: linux-block@...r.kernel.org
Signed-off-by: Alexander Gordeev <agordeev@...hat.com>
---
 block/blk-mq.c | 50 ++++++++++++++++++++++++--------------------------
 1 file changed, 24 insertions(+), 26 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index af6d049..5ecbb5f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1714,6 +1714,22 @@ static struct blk_mq_hw_ctx *blk_mq_init_hctx(struct request_queue *q,
 	if (!zalloc_cpumask_var_node(&hctx->cpumask, GFP_KERNEL, node))
 		goto free_hctx;
 
+	/*
+	 * Allocate space for all possible cpus to avoid allocation at
+	 * runtime
+	 */
+	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+					GFP_KERNEL, node);
+	if (!hctx->ctxs)
+		goto free_cpumask;
+
+	if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
+		goto free_ctxs;
+
+	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
+	if (!hctx->fq)
+		goto free_bitmap;
+
 	INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
 	INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
 	spin_lock_init(&hctx->lock);
@@ -1722,55 +1738,37 @@ static struct blk_mq_hw_ctx *blk_mq_init_hctx(struct request_queue *q,
 	hctx->numa_node = node;
 	hctx->queue = q;
 	hctx->queue_num = hctx_idx;
+	hctx->nr_ctx = 0;
 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
+	hctx->tags = set->tags[hctx_idx];
 
 	blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
 					blk_mq_hctx_notify, hctx);
 	blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
 
-	hctx->tags = set->tags[hctx_idx];
-
-	/*
-	 * Allocate space for all possible cpus to avoid allocation at
-	 * runtime
-	 */
-	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
-					GFP_KERNEL, node);
-	if (!hctx->ctxs)
-		goto unregister_cpu_notifier;
-
-	if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
-		goto free_ctxs;
-
-	hctx->nr_ctx = 0;
-
 	if (set->ops->init_hctx &&
 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
-		goto free_bitmap;
-
-	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
-	if (!hctx->fq)
-		goto exit_hctx;
+		goto unregister_cpu_notifier;
 
 	if (set->ops->init_request &&
 	    set->ops->init_request(set->driver_data,
 				   hctx->fq->flush_rq, hctx_idx,
 				   flush_start_tag + hctx_idx, node))
-		goto free_fq;
+		goto exit_hctx;
 
 	return hctx;
 
- free_fq:
-	kfree(hctx->fq);
  exit_hctx:
 	if (set->ops->exit_hctx)
 		set->ops->exit_hctx(hctx, hctx_idx);
+ unregister_cpu_notifier:
+	blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+	kfree(hctx->fq);
  free_bitmap:
 	blk_mq_free_bitmap(&hctx->ctx_map);
  free_ctxs:
 	kfree(hctx->ctxs);
- unregister_cpu_notifier:
-	blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ free_cpumask:
 	free_cpumask_var(hctx->cpumask);
  free_hctx:
 	kfree(hctx);
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ