lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 3 Nov 2016 08:28:04 -0500
From:   Tom Lendacky <thomas.lendacky@....com>
To:     <netdev@...r.kernel.org>
CC:     Florian Fainelli <f.fainelli@...il.com>,
        David Miller <davem@...emloft.net>
Subject: [PATCH net-next v1 02/21] amd-xgbe: Prepare for priority-based FIFO
 allocation

Currently, the Rx and Tx fifos are evenly allocated between the hardware
queues of the device.  As more queues are instantiated, the fifo memory
needs to be able to be allocated based on queue priority. This allows for
higher priority queues to have more fifo memory than lower priority
queues. Prepare for this by modifying the current fifo calculation to
assign the fifo queue allocation in an array that is then used to program
the hardware.

Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
---
 drivers/net/ethernet/amd/xgbe/xgbe-dev.c |   57 ++++++++++++++++++++++--------
 drivers/net/ethernet/amd/xgbe/xgbe.h     |    3 +-
 2 files changed, 43 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1babcc1..b3bea63 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2000,19 +2000,37 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
 	XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
 }
 
-static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
-						  unsigned int queue_count)
+static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
 {
-	unsigned int q_fifo_size;
-	unsigned int p_fifo;
+	unsigned int fifo_size;
+
+	/* Calculate the configured fifo size */
+	fifo_size = 1 << (pdata->hw_feat.tx_fifo_size + 7);
+
+	/* The configured value may not be the actual amount of fifo RAM */
+	return min_t(unsigned int, XGMAC_FIFO_TX_MAX, fifo_size);
+}
+
+static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
+	unsigned int fifo_size;
 
 	/* Calculate the configured fifo size */
-	q_fifo_size = 1 << (fifo_size + 7);
+	fifo_size = 1 << (pdata->hw_feat.rx_fifo_size + 7);
 
 	/* The configured value may not be the actual amount of fifo RAM */
-	q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+	return min_t(unsigned int, XGMAC_FIFO_RX_MAX, fifo_size);
+}
+
+static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
+				      unsigned int queue_count,
+				      unsigned int *fifo)
+{
+	unsigned int q_fifo_size;
+	unsigned int p_fifo;
+	unsigned int i;
 
-	q_fifo_size = q_fifo_size / queue_count;
+	q_fifo_size = fifo_size / queue_count;
 
 	/* Each increment in the queue fifo size represents 256 bytes of
 	 * fifo, with 0 representing 256 bytes. Distribute the fifo equally
@@ -2022,39 +2040,46 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
 	if (p_fifo)
 		p_fifo--;
 
-	return p_fifo;
+	for (i = 0; i < queue_count; i++)
+		fifo[i] = p_fifo;
+
+	return;
 }
 
 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
 {
 	unsigned int fifo_size;
+	unsigned int fifo[XGBE_MAX_QUEUES];
 	unsigned int i;
 
-	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
-						  pdata->tx_q_count);
+	fifo_size = xgbe_get_tx_fifo_size(pdata);
+
+	xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
 
 	for (i = 0; i < pdata->tx_q_count; i++)
-		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
+		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
 
 	netif_info(pdata, drv, pdata->netdev,
 		   "%d Tx hardware queues, %d byte fifo per queue\n",
-		   pdata->tx_q_count, ((fifo_size + 1) * 256));
+		   pdata->tx_q_count, ((fifo[0] + 1) * 256));
 }
 
 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
 {
 	unsigned int fifo_size;
+	unsigned int fifo[XGBE_MAX_QUEUES];
 	unsigned int i;
 
-	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
-						  pdata->rx_q_count);
+	fifo_size = xgbe_get_rx_fifo_size(pdata);
+
+	xgbe_calculate_equal_fifo(fifo_size, pdata->rx_q_count, fifo);
 
 	for (i = 0; i < pdata->rx_q_count; i++)
-		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
+		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
 
 	netif_info(pdata, drv, pdata->netdev,
 		   "%d Rx hardware queues, %d byte fifo per queue\n",
-		   pdata->rx_q_count, ((fifo_size + 1) * 256));
+		   pdata->rx_q_count, ((fifo[0] + 1) * 256));
 }
 
 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 5dd17dc..d838b44 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -208,7 +208,8 @@
 #define XGMAC_DRIVER_CONTEXT	1
 #define XGMAC_IOCTL_CONTEXT	2
 
-#define XGBE_FIFO_MAX		81920
+#define XGMAC_FIFO_RX_MAX	81920
+#define XGMAC_FIFO_TX_MAX	81920
 
 #define XGBE_TC_MIN_QUANTUM	10
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ