[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1613402622-11451-1-git-send-email-stefanc@marvell.com>
Date: Mon, 15 Feb 2021 17:23:42 +0200
From: <stefanc@...vell.com>
To: <netdev@...r.kernel.org>
CC: <thomas.petazzoni@...tlin.com>, <davem@...emloft.net>,
<nadavh@...vell.com>, <ymarkman@...vell.com>,
<linux-kernel@...r.kernel.org>, <stefanc@...vell.com>,
<kuba@...nel.org>, <linux@...linux.org.uk>, <mw@...ihalf.com>,
<andrew@...n.ch>, <rmk+kernel@...linux.org.uk>,
<atenart@...nel.org>
Subject: [net-next] net: mvpp2: Add TX flow control support for jumbo frames
From: Stefan Chulski <stefanc@...vell.com>
With MTU less than 1500B on all ports, the driver uses per CPU pool mode.
If one of the ports set to jumbo frame MTU size, all ports move
to shared pools mode.
Here, buffer manager TX Flow Control reconfigured on all ports.
Signed-off-by: Stefan Chulski <stefanc@...vell.com>
---
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 26 ++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 222e9a3..10c17d1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -924,6 +924,25 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
}
+/* disable/enable flow control for BM pool on all ports */
+static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
+{
+ struct mvpp2_port *port;
+ int i;
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (port->priv->percpu_pools) {
+ for (i = 0; i < port->nrxqs; i++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
+ port->tx_fc & en);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
+ }
+ }
+}
+
static int mvpp2_enable_global_fc(struct mvpp2 *priv)
{
int val, timeout = 0;
@@ -4913,6 +4932,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
*/
static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
{
+ bool change_percpu = (percpu != priv->percpu_pools);
int numbufs = MVPP2_BM_POOLS_NUM, i;
struct mvpp2_port *port = NULL;
bool status[MVPP2_MAX_PORTS];
@@ -4928,6 +4948,9 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
if (priv->percpu_pools)
numbufs = port->nrxqs * 2;
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, false);
+
for (i = 0; i < numbufs; i++)
mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
@@ -4942,6 +4965,9 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
mvpp2_open(port->dev);
}
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, true);
+
return 0;
}
--
1.9.1
Powered by blists - more mailing lists