lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250627-macb-v2-11-ff8207d0bb77@bootlin.com>
Date: Fri, 27 Jun 2025 11:08:57 +0200
From: Théo Lebrun <theo.lebrun@...tlin.com>
To: Andrew Lunn <andrew+netdev@...n.ch>, 
 "David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, 
 Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, 
 Rob Herring <robh@...nel.org>, Krzysztof Kozlowski <krzk+dt@...nel.org>, 
 Conor Dooley <conor+dt@...nel.org>, 
 Nicolas Ferre <nicolas.ferre@...rochip.com>, 
 Claudiu Beznea <claudiu.beznea@...on.dev>, 
 Paul Walmsley <paul.walmsley@...ive.com>, 
 Palmer Dabbelt <palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>, 
 Alexandre Ghiti <alex@...ti.fr>, Samuel Holland <samuel.holland@...ive.com>, 
 Richard Cochran <richardcochran@...il.com>, 
 Russell King <linux@...linux.org.uk>, 
 Thomas Bogendoerfer <tsbogend@...ha.franken.de>, 
 Vladimir Kondratiev <vladimir.kondratiev@...ileye.com>, 
 Gregory CLEMENT <gregory.clement@...tlin.com>, 
 Cyrille Pitchen <cyrille.pitchen@...el.com>, 
 Harini Katakam <harini.katakam@...inx.com>, 
 Rafal Ozieblo <rafalo@...ence.com>, 
 Haavard Skinnemoen <hskinnemoen@...el.com>, Jeff Garzik <jeff@...zik.org>
Cc: netdev@...r.kernel.org, devicetree@...r.kernel.org, 
 linux-kernel@...r.kernel.org, linux-riscv@...ts.infradead.org, 
 linux-mips@...r.kernel.org, Thomas Petazzoni <thomas.petazzoni@...tlin.com>, 
 Tawfik Bayouk <tawfik.bayouk@...ileye.com>, 
 Théo Lebrun <theo.lebrun@...tlin.com>
Subject: [PATCH net-next v2 11/18] net: macb: single dma_alloc_coherent()
 for DMA descriptors

Move from two (Tx/Rx) dma_alloc_coherent() for DMA descriptor rings *per
queue* to two dma_alloc_coherent() overall.

Issue is with how all queues share the same register for configuring the
upper 32-bits of Tx/Rx descriptor rings. For example, with Tx, notice
how TBQPH does *not* depend on the queue index:

	#define GEM_TBQP(hw_q)		(0x0440 + ((hw_q) << 2))
	#define GEM_TBQPH(hw_q)		(0x04C8)

	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
		queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
	#endif

To maxime our chances of getting valid DMA addresses, we do a single
dma_alloc_coherent() across queues. This improves the odds because
alloc_pages() guarantees natural alignment. It cannot ensure valid DMA
addresses because of IOMMU or codepaths that don't go through
alloc_pages().

We error out if all rings don't have the same upper 32 bits, which is
better than the current (theoretical, not reproduced) silent corruption
caused by hardware that accesses invalid addresses.

Two considerations:
 - dma_alloc_coherent() gives us page alignment. Here we remove this
   containst meaning each queue's ring won't be page-aligned anymore.
 - This can save some memory. Less allocations means less overhead
   (constant cost per alloc) and less wasted bytes due to alignment
   constraints.

Fixes: 02c958dd3446 ("net/macb: add TX multiqueue support for gem")
Signed-off-by: Théo Lebrun <theo.lebrun@...tlin.com>
---
 drivers/net/ethernet/cadence/macb_main.c | 83 ++++++++++++++++++--------------
 1 file changed, 46 insertions(+), 37 deletions(-)

diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d3b3635998cad095246edf8a75faebbcf7115355..48b75d95861317b9925b366446c7572c7e186628 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2445,33 +2445,32 @@ static void macb_free_rx_buffers(struct macb *bp)
 
 static void macb_free_consistent(struct macb *bp)
 {
-	struct macb_queue *queue;
+	size_t size, tx_size_per_queue, rx_size_per_queue;
+	struct macb_queue *queue, *queue0 = bp->queues;
+	struct device *dev = &bp->pdev->dev;
 	unsigned int q;
-	int size;
 
 	if (bp->rx_ring_tieoff) {
-		dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+		dma_free_coherent(dev, macb_dma_desc_get_size(bp),
 				  bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
 		bp->rx_ring_tieoff = NULL;
 	}
 
 	bp->macbgem_ops.mog_free_rx_buffers(bp);
 
+	tx_size_per_queue = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
+	size = bp->num_queues * tx_size_per_queue;
+	dma_free_coherent(dev, size, queue0->tx_ring, queue0->tx_ring_dma);
+
+	rx_size_per_queue = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
+	size = bp->num_queues * rx_size_per_queue;
+	dma_free_coherent(dev, size, queue0->rx_ring, queue0->rx_ring_dma);
+
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 		kfree(queue->tx_skb);
 		queue->tx_skb = NULL;
-		if (queue->tx_ring) {
-			size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
-			dma_free_coherent(&bp->pdev->dev, size,
-					  queue->tx_ring, queue->tx_ring_dma);
-			queue->tx_ring = NULL;
-		}
-		if (queue->rx_ring) {
-			size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
-			dma_free_coherent(&bp->pdev->dev, size,
-					  queue->rx_ring, queue->rx_ring_dma);
-			queue->rx_ring = NULL;
-		}
+		queue->tx_ring = NULL; /* Single buffer owned by queue0 */
+		queue->rx_ring = NULL; /* Single buffer owned by queue0 */
 	}
 }
 
@@ -2513,37 +2512,47 @@ static int macb_alloc_rx_buffers(struct macb *bp)
 
 static int macb_alloc_consistent(struct macb *bp)
 {
+	size_t size, tx_size_per_queue, rx_size_per_queue;
+	dma_addr_t tx_dma, rx_dma;
+	struct device *dev = &bp->pdev->dev;
 	struct macb_queue *queue;
 	unsigned int q;
-	int size;
+	void *tx, *rx;
+
+	/*
+	 * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
+	 * We cannot enforce this guarantee, the best we can do is do a single
+	 * allocation and hope it will land into alloc_pages() that guarantees
+	 * natural alignment of physical addresses.
+	 */
+
+	tx_size_per_queue = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
+	size = bp->num_queues * tx_size_per_queue;
+	tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
+	if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
+		goto out_err;
+	netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
+		   size, bp->num_queues, (unsigned long)tx_dma, tx);
+
+	rx_size_per_queue = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
+	size = bp->num_queues * rx_size_per_queue;
+	rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
+	if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
+		goto out_err;
+	netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
+		   size, bp->num_queues, (unsigned long)rx_dma, rx);
 
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-		size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
-		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
-						    &queue->tx_ring_dma,
-						    GFP_KERNEL);
-		if (!queue->tx_ring ||
-		    upper_32_bits(queue->tx_ring_dma) != upper_32_bits(bp->queues->tx_ring_dma))
-			goto out_err;
-		netdev_dbg(bp->dev,
-			   "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
-			   q, size, (unsigned long)queue->tx_ring_dma,
-			   queue->tx_ring);
+		queue->tx_ring = tx + tx_size_per_queue * q;
+		queue->tx_ring_dma = tx_dma + tx_size_per_queue * q;
+
+		queue->rx_ring = rx + rx_size_per_queue * q;
+		queue->rx_ring_dma = rx_dma + rx_size_per_queue * q;
 
 		size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
 		queue->tx_skb = kmalloc(size, GFP_KERNEL);
 		if (!queue->tx_skb)
 			goto out_err;
-
-		size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
-		queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
-						 &queue->rx_ring_dma, GFP_KERNEL);
-		if (!queue->rx_ring ||
-		    upper_32_bits(queue->rx_ring_dma) != upper_32_bits(bp->queues->rx_ring_dma))
-			goto out_err;
-		netdev_dbg(bp->dev,
-			   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
-			   size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
 	}
 	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
 		goto out_err;

-- 
2.50.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ