lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <d31efb3af96031be9a94d9c803a0e7118da01fa5.1390897693.git.agordeev@redhat.com>
Date:	Tue, 28 Jan 2014 09:38:59 +0100
From:	Alexander Gordeev <agordeev@...hat.com>
To:	linux-kernel@...r.kernel.org
Cc:	Alexander Gordeev <agordeev@...hat.com>,
	Keith Busch <keith.busch@...el.com>,
	Matthew Wilcox <willy@...ux.intel.com>,
	linux-nvme@...ts.infradead.org
Subject: [PATCH 13/14] NVMe: Factor out nvme_setup_interrupts()

Signed-off-by: Alexander Gordeev <agordeev@...hat.com>
---
 drivers/block/nvme-core.c |   33 ++++++++++++++++++++++++++-------
 1 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 39868be..83d57b3 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1945,12 +1945,18 @@ static int nvme_init_interrupts(struct nvme_dev *dev, int nr_io_queues)
 	return vecs;
 }
 
-static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
+static int nvme_setup_interrupts(struct nvme_dev *dev, int nr_io_queues)
 {
-	int result, cpu, i, q_depth;
+	struct nvme_queue *adminq = dev->queues[0];
+	int result;
 
-	/* Deregister the admin queue's interrupt */
-	free_irq(dev->entry[dev->queues[0]->cq_vector].vector, dev->queues[0]);
+	/*
+	 * Deregister the admin queue's interrupt, since it is about
+	 * to move to other IRQ number. We do not re-configure the
+	 * admin queue - are there any adverse effects of this trick?
+	 * Should we call nvme_clear_queue() to mimic nvme_disable_queue()?
+	 */
+	free_irq(dev->entry[adminq->cq_vector].vector, adminq);
 
 	/*
 	 * Should investigate if there's a performance win from allocating
@@ -1963,12 +1969,19 @@ static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
 	 */
 	nr_io_queues = nvme_init_interrupts(dev, nr_io_queues);
 
-	result = queue_request_irq(dev->queues[0], "nvme admin");
+	result = queue_request_irq(adminq, "nvme admin");
 	if (result) {
-		dev->queues[0]->q_suspended = 1;
-		goto free_queues;
+		adminq->q_suspended = 1;
+		return result;
 	}
 
+	return nr_io_queues;
+}
+
+static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
+{
+	int result, cpu, i, q_depth;
+
 	/* Free previously allocated queues that are no longer usable */
 	spin_lock(&dev_list_lock);
 	for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
@@ -2442,6 +2455,11 @@ static int nvme_dev_start(struct nvme_dev *dev)
 	if (result < 0)
 		goto disable;
 
+	result = nvme_setup_interrupts(dev, result);
+	if (result < 0)
+		/* Admin queue interrupt has been torn down - can not go on */
+		goto delete;
+
 	result = nvme_setup_io_queues(dev, result);
 	if (result)
 		goto disable;
@@ -2453,6 +2471,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
 		return -EBUSY;
 
 	nvme_disable_queue(dev->queues[0]);
+ delete:
 	spin_lock(&dev_list_lock);
 	list_del_init(&dev->node);
 	spin_unlock(&dev_list_lock);
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ