lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1511766238-1710-2-git-send-email-harinik@xilinx.com>
Date:   Mon, 27 Nov 2017 12:33:58 +0530
From:   Harini Katakam <harini.katakam@...inx.com>
To:     <nicolas.ferre@...el.com>, <davem@...emloft.net>,
        <harinikatakamlinux@...il.com>
CC:     <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        <harinik@...inx.com>, <michals@...inx.com>,
        "Edgar E. Iglesias" <edgar.iglesias@...inx.com>,
        Michal Simek <michal.simek@...inx.com>
Subject: [RFC PATCH 2/2] net: macb: Tie-off unused RX queues

From: "Edgar E. Iglesias" <edgar.iglesias@...inx.com>

Currently, we only use the first receive queue and leave the
remaining DMA descriptor pointers pointing at 0.

Disable unused queues by connecting them to a looped descriptor
chain without free slots.

Signed-off-by: Edgar E. Iglesias <edgar.iglesias@...inx.com>
Signed-off-by: Harini Katakam <harinik@...inx.com>
Signed-off-by: Michal Simek <michal.simek@...inx.com>
---
 drivers/net/ethernet/cadence/macb.h      |  2 ++
 drivers/net/ethernet/cadence/macb_main.c | 42 ++++++++++++++++++++++++++++++++
 2 files changed, 44 insertions(+)

diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index acb6578..974c801 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -994,6 +994,7 @@ struct macb {
 	unsigned int		rx_tail;
 	unsigned int		rx_prepared_head;
 	struct macb_dma_desc	*rx_ring;
+	struct macb_dma_desc	*rx_ring_tieoff;
 	struct sk_buff		**rx_skbuff;
 	void			*rx_buffers;
 	size_t			rx_buffer_size;
@@ -1019,6 +1020,7 @@ struct macb {
 	}			hw_stats;
 
 	dma_addr_t		rx_ring_dma;
+	dma_addr_t		rx_ring_tieoff_dma;
 	dma_addr_t		rx_buffers_dma;
 
 	struct macb_or_gem_ops	macbgem_ops;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 623ae9c..b14a04d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1755,6 +1755,12 @@ static void macb_free_consistent(struct macb *bp)
 		bp->rx_ring = NULL;
 	}
 
+	if (bp->rx_ring_tieoff) {
+		dma_free_coherent(&bp->pdev->dev, sizeof(bp->rx_ring_tieoff[0]),
+				  bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+		bp->rx_ring_tieoff = NULL;
+	}
+
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 		kfree(queue->tx_skb);
 		queue->tx_skb = NULL;
@@ -1826,6 +1832,19 @@ static int macb_alloc_consistent(struct macb *bp)
 					 &bp->rx_ring_dma, GFP_KERNEL);
 	if (!bp->rx_ring)
 		goto out_err;
+
+	/* If we have more than one queue, allocate a tie off descriptor
+	 * that will be used to disable unused RX queues.
+	 */
+	if (bp->num_queues > 1) {
+		bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+						sizeof(bp->rx_ring_tieoff[0]),
+						&bp->rx_ring_tieoff_dma,
+						GFP_KERNEL);
+		if (!bp->rx_ring_tieoff)
+			goto out_err;
+	}
+
 	netdev_dbg(bp->dev,
 		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
 		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
@@ -1840,6 +1859,19 @@ static int macb_alloc_consistent(struct macb *bp)
 	return -ENOMEM;
 }
 
+static void macb_init_tieoff(struct macb *bp)
+{
+	struct macb_dma_desc *d = bp->rx_ring_tieoff;
+
+	if (bp->num_queues > 1) {
+		/* Setup a wrapping descriptor with no free slots
+		 * (WRAP and USED) to tie off/disable unused RX queues.
+		 */
+		d->addr = MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED);
+		d->ctrl = 0;
+	}
+}
+
 static void gem_init_rings(struct macb *bp)
 {
 	struct macb_queue *queue;
@@ -1862,6 +1894,7 @@ static void gem_init_rings(struct macb *bp)
 	bp->rx_prepared_head = 0;
 
 	gem_rx_refill(bp);
+	macb_init_tieoff(bp);
 }
 
 static void macb_init_rings(struct macb *bp)
@@ -1879,6 +1912,7 @@ static void macb_init_rings(struct macb *bp)
 	bp->queues[0].tx_head = 0;
 	bp->queues[0].tx_tail = 0;
 	desc->ctrl |= MACB_BIT(TX_WRAP);
+	macb_init_tieoff(bp);
 }
 
 static void macb_reset_hw(struct macb *bp)
@@ -2063,6 +2097,14 @@ static void macb_init_hw(struct macb *bp)
 			queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
 
+		/* We only use the first queue at the moment. Remaining
+		 * queues must be tied-off before we enable the receiver.
+		 *
+		 * See the documentation for receive_q1_ptr for more info.
+		 */
+		if (q)
+			queue_writel(queue, RBQP, bp->rx_ring_tieoff_dma);
+
 		/* Enable interrupts */
 		queue_writel(queue, IER,
 			     MACB_RX_INT_FLAGS |
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ