[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1613040542-16500-10-git-send-email-stefanc@marvell.com>
Date: Thu, 11 Feb 2021 12:48:56 +0200
From: <stefanc@...vell.com>
To: <netdev@...r.kernel.org>
CC: <thomas.petazzoni@...tlin.com>, <davem@...emloft.net>,
<nadavh@...vell.com>, <ymarkman@...vell.com>,
<linux-kernel@...r.kernel.org>, <stefanc@...vell.com>,
<kuba@...nel.org>, <linux@...linux.org.uk>, <mw@...ihalf.com>,
<andrew@...n.ch>, <rmk+kernel@...linux.org.uk>,
<atenart@...nel.org>, <devicetree@...r.kernel.org>,
<robh+dt@...nel.org>, <sebastian.hesselbarth@...il.com>,
<gregory.clement@...tlin.com>,
<linux-arm-kernel@...ts.infradead.org>
Subject: [PATCH v13 net-next 09/15] net: mvpp2: enable global flow control
From: Stefan Chulski <stefanc@...vell.com>
This patch enables global flow control in FW and in the phylink validate mask.
Signed-off-by: Stefan Chulski <stefanc@...vell.com>
Acked-by: Marcin Wojtas <mw@...ihalf.com>
---
drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 11 +++++--
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 30 +++++++++++++++++++-
2 files changed, 37 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index d2cc513c..8945fb9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -763,9 +763,11 @@
((kb) * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
/* MSS Flow control */
-#define FC_QUANTA 0xFFFF
-#define FC_CLK_DIVIDER 100
-#define MSS_THRESHOLD_STOP 768
+#define MSS_FC_COM_REG 0
+#define FLOW_CONTROL_ENABLE_BIT BIT(0)
+#define FC_QUANTA 0xFFFF
+#define FC_CLK_DIVIDER 100
+#define MSS_THRESHOLD_STOP 768
/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \
@@ -1017,6 +1019,9 @@ struct mvpp2 {
/* page_pool allocator */
struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ];
+
+ /* Global TX Flow Control config */
+ bool global_tx_fc;
};
struct mvpp2_pcpu_stats {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 8b4073c..027101b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -91,6 +91,16 @@ static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
return cpu % priv->nthreads;
}
+static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->cm3_base + offset);
+}
+
+static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->cm3_base + offset);
+}
+
static struct page_pool *
mvpp2_create_page_pool(struct device *dev, int num, int len,
enum dma_data_direction dma_dir)
@@ -5950,6 +5960,11 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
+ if (port->priv->global_tx_fc) {
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ }
+
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
@@ -6951,7 +6966,7 @@ static int mvpp2_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
int i, shared;
- int err;
+ int err, val;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -7003,6 +7018,10 @@ static int mvpp2_probe(struct platform_device *pdev)
err = mvpp2_get_sram(pdev, priv);
if (err)
dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
+
+ /* Enable global Flow Control only if handler to SRAM not NULL */
+ if (priv->cm3_base)
+ priv->global_tx_fc = true;
}
if (priv->hw_version != MVPP21 && dev_of_node(&pdev->dev)) {
@@ -7168,6 +7187,15 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ /* Enable global flow control. In this stage global
+ * flow control enabled, but still disabled per port.
+ */
+ if (priv->global_tx_fc && priv->hw_version != MVPP21) {
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+ }
+
mvpp2_dbgfs_init(priv, pdev->name);
platform_set_drvdata(pdev, priv);
--
1.9.1
Powered by blists - more mailing lists