[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180219171116.15326-4-niklas.cassel@axis.com>
Date: Mon, 19 Feb 2018 18:11:11 +0100
From: Niklas Cassel <niklas.cassel@...s.com>
To: Giuseppe Cavallaro <peppe.cavallaro@...com>,
Alexandre Torgue <alexandre.torgue@...com>
Cc: Jose.Abreu@...opsys.com, vbhadram@...dia.com,
Niklas Cassel <niklass@...s.com>, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH net-next 3/7] net: stmmac: WARN if tx_skbuff entries are reused before cleared
The current code assumes that a tx_skbuff entry has been cleared
by stmmac_tx_clean() before stmmac_xmit()/stmmac_tso_xmit()
assigns a new skb to that entry. However, since we never check
the current value before overwriting it, it is theoretically
possible that a non-NULL value is overwritten.
Add WARN_ONs to verify that each entry in tx_skbuff is NULL
before it is assigned a new value.
Signed-off-by: Niklas Cassel <niklas.cassel@...s.com>
---
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 24afe7733cde..9881df126227 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2794,6 +2794,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
while (tmp_len > 0) {
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
desc = tx_q->dma_tx + tx_q->cur_tx;
desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
@@ -2878,6 +2879,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->set_mss(mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
if (netif_msg_tx_queued(priv)) {
@@ -2888,6 +2890,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
first_entry = tx_q->cur_tx;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry;
first = desc;
@@ -3062,6 +3065,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tx_q->cur_tx;
first_entry = entry;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
@@ -3090,6 +3094,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
bool last_segment = (i == (nfrags - 1));
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
--
2.14.2
Powered by blists - more mailing lists