[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1460051876-53135-5-git-send-email-blogic@openwrt.org>
Date: Thu, 7 Apr 2016 19:57:52 +0200
From: John Crispin <blogic@...nwrt.org>
To: "David S. Miller" <davem@...emloft.net>
Cc: Felix Fietkau <nbd@...nwrt.org>,
Matthias Brugger <matthias.bgg@...il.com>,
Sean Wang (王志亘)
<sean.wang@...iatek.com>, netdev@...r.kernel.org,
linux-mediatek@...ts.infradead.org, linux-kernel@...r.kernel.org,
John Crispin <blogic@...nwrt.org>
Subject: [PATCH 5/9] net: mediatek: fix stop and wakeup of queue
The driver supports 2 MACs. Both run on the same DMA ring. If we go
above/below the TX rings threshold value, we always need to wake/stop
the queue of both devices. Not doing to can cause TX stalls and packet
drops on one of the devices.
Signed-off-by: John Crispin <blogic@...nwrt.org>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 37 +++++++++++++++++++--------
1 file changed, 27 insertions(+), 10 deletions(-)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index a4982e4..4ebc42e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -684,6 +684,28 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
return nfrags;
}
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_wake_queue(eth->netdev[i]);
+ }
+}
+
+static void mtk_stop_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_stop_queue(eth->netdev[i]);
+ }
+}
+
static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -695,7 +717,7 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
- netif_stop_queue(dev);
+ mtk_stop_queue(eth);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
@@ -720,10 +742,10 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
- netif_stop_queue(dev);
+ mtk_stop_queue(eth);
if (unlikely(atomic_read(&ring->free_count) >
ring->thresh))
- netif_wake_queue(dev);
+ mtk_wake_queue(eth);
}
return NETDEV_TX_OK;
@@ -897,13 +919,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
if (!total)
return 0;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i] ||
- unlikely(!netif_queue_stopped(eth->netdev[i])))
- continue;
- if (atomic_read(&ring->free_count) > ring->thresh)
- netif_wake_queue(eth->netdev[i]);
- }
+ if (atomic_read(&ring->free_count) > ring->thresh)
+ mtk_wake_queue(eth);
return total;
}
--
1.7.10.4
Powered by blists - more mailing lists