lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20130904185935.GF24898@oc6784271780.ibm.com>
Date:	Wed, 4 Sep 2013 13:59:35 -0500
From:	"Philip J. Kelleher" <pjk1939@...ux.vnet.ibm.com>
To:	axboe@...nel.dk
Cc:	linux-kernel@...r.kernel.org, brking@...ux.vnet.ibm.com
Subject: [PATCH v3 2/2] rsxx: Moving pci_map_page to prevent overflow.

From: Philip J Kelleher <pjk1939@...ux.vnet.ibm.com>

The pci_map_page function has been moved into our
issued workqueue to prevent an us running out of
mappable addresses on non-HWWD PCIe x8 slots. The
maximum amount that can possible be mapped at one
time now is: 255 dmas X 4 dma channels X 4096 Bytes.

Signed-off-by: Philip J Kelleher <pjk1939@...ux.vnet.ibm.com>
-------------------------------------------------------------------------------


diff -uprN -X linux-block-vanilla/Documentation/dontdiff linux-block-vanilla/drivers/block/rsxx/core.c linux-block/drivers/block/rsxx/core.c
--- linux-block-vanilla/drivers/block/rsxx/core.c	2013-09-04 11:15:04.553215667 -0500
+++ linux-block/drivers/block/rsxx/core.c	2013-09-04 11:18:46.710034661 -0500
@@ -749,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(
 
 	card->eeh_state = 0;
 
-	st = rsxx_eeh_remap_dmas(card);
-	if (st)
-		goto failed_remap_dmas;
-
 	spin_lock_irqsave(&card->irq_lock, flags);
 	if (card->n_targets & RSXX_MAX_TARGETS)
 		rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
@@ -779,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(
 	return PCI_ERS_RESULT_RECOVERED;
 
 failed_hw_buffers_init:
-failed_remap_dmas:
 	for (i = 0; i < card->n_targets; i++) {
 		if (card->ctrl[i].status.buf)
 			pci_free_consistent(card->dev,
diff -uprN -X linux-block-vanilla/Documentation/dontdiff linux-block-vanilla/drivers/block/rsxx/dma.c linux-block/drivers/block/rsxx/dma.c
--- linux-block-vanilla/drivers/block/rsxx/dma.c	2013-09-04 11:15:04.562991087 -0500
+++ linux-block/drivers/block/rsxx/dma.c	2013-09-04 11:20:21.119990932 -0500
@@ -397,6 +397,7 @@ static void rsxx_issue_dmas(struct rsxx_
 	int tag;
 	int cmds_pending = 0;
 	struct hw_cmd *hw_cmd_buf;
+	int dir;
 
 	hw_cmd_buf = ctrl->cmd.buf;
 
@@ -433,6 +434,28 @@ static void rsxx_issue_dmas(struct rsxx_
 			continue;
 		}
 
+		if (dma->cmd == HW_CMD_BLK_WRITE)
+			dir = PCI_DMA_TODEVICE;
+		else
+			dir = PCI_DMA_FROMDEVICE;
+
+		/*
+		 * The function pci_map_page is placed here because we can
+		 * only, by design, issue up to 255 commands to the hardware
+		 * at one time per DMA channel. So the maximum amount of mapped
+		 * memory would be 255 * 4 channels * 4096 Bytes which is less
+		 * than 2GB, the limit of a x8 Non-HWWD PCIe slot. This way the
+		 * pci_map_page function should never fail because of a
+		 * lack of mappable memory.
+		 */
+		dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
+				     dma->pg_off, dma->sub_page.cnt << 9, dir);
+		if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+			push_tracker(ctrl->trackers, tag);
+			rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+			continue;
+		}
+
 		set_tracker_dma(ctrl->trackers, tag, dma);
 		hw_cmd_buf[ctrl->cmd.idx].command  = dma->cmd;
 		hw_cmd_buf[ctrl->cmd.idx].tag      = tag;
@@ -629,14 +652,6 @@ static int rsxx_queue_dma(struct rsxx_ca
 	if (!dma)
 		return -ENOMEM;
 
-	dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
-				     dir ? PCI_DMA_TODEVICE :
-				     PCI_DMA_FROMDEVICE);
-	if (pci_dma_mapping_error(card->dev, dma->dma_addr)) {
-		kmem_cache_free(rsxx_dma_pool, dma);
-		return -ENOMEM;
-	}
-
 	dma->cmd          = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
 	dma->laddr        = laddr;
 	dma->sub_page.off = (dma_off >> 9);
@@ -1039,6 +1054,11 @@ int rsxx_eeh_save_issued_dmas(struct rsx
 			else
 				card->ctrl[i].stats.reads_issued--;
 
+			pci_unmap_page(card->dev, dma->dma_addr,
+				       get_dma_size(dma),
+				       dma->cmd == HW_CMD_BLK_WRITE ?
+				       PCI_DMA_TODEVICE :
+				       PCI_DMA_FROMDEVICE);
 			list_add_tail(&dma->list, &issued_dmas[i]);
 			push_tracker(card->ctrl[i].trackers, j);
 			cnt++;
@@ -1050,15 +1070,6 @@ int rsxx_eeh_save_issued_dmas(struct rsx
 		atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
 		card->ctrl[i].stats.sw_q_depth += cnt;
 		card->ctrl[i].e_cnt = 0;
-
-		list_for_each_entry(dma, &card->ctrl[i].queue, list) {
-			if (!pci_dma_mapping_error(card->dev, dma->dma_addr))
-				pci_unmap_page(card->dev, dma->dma_addr,
-					       get_dma_size(dma),
-					       dma->cmd == HW_CMD_BLK_WRITE ?
-					       PCI_DMA_TODEVICE :
-					       PCI_DMA_FROMDEVICE);
-		}
 		spin_unlock_bh(&card->ctrl[i].queue_lock);
 	}
 
@@ -1067,31 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsx
 	return 0;
 }
 
-int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
-{
-	struct rsxx_dma *dma;
-	int i;
-
-	for (i = 0; i < card->n_targets; i++) {
-		spin_lock_bh(&card->ctrl[i].queue_lock);
-		list_for_each_entry(dma, &card->ctrl[i].queue, list) {
-			dma->dma_addr = pci_map_page(card->dev, dma->page,
-					dma->pg_off, get_dma_size(dma),
-					dma->cmd == HW_CMD_BLK_WRITE ?
-					PCI_DMA_TODEVICE :
-					PCI_DMA_FROMDEVICE);
-			if (pci_dma_mapping_error(card->dev, dma->dma_addr)) {
-				spin_unlock_bh(&card->ctrl[i].queue_lock);
-				kmem_cache_free(rsxx_dma_pool, dma);
-				return -ENOMEM;
-			}
-		}
-		spin_unlock_bh(&card->ctrl[i].queue_lock);
-	}
-
-	return 0;
-}
-
 int rsxx_dma_init(void)
 {
 	rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ