[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220512171853.4100193-2-robert.hancock@calian.com>
Date: Thu, 12 May 2022 11:18:52 -0600
From: Robert Hancock <robert.hancock@...ian.com>
To: netdev@...r.kernel.org
Cc: radhey.shyam.pandey@...inx.com, davem@...emloft.net,
edumazet@...gle.com, kuba@...nel.org, pabeni@...hat.com,
michal.simek@...inx.com, linux-arm-kernel@...ts.infradead.org,
Robert Hancock <robert.hancock@...ian.com>
Subject: [PATCH net-next v7 1/2] net: axienet: Be more careful about updating tx_bd_tail
The axienet_start_xmit function was updating the tx_bd_tail variable
multiple times, with potential rollbacks on error or invalid
intermediate positions, even though this variable is also used in the
TX completion path. Use READ_ONCE where this variable is read and
WRITE_ONCE where it is written to make this update more atomic, and
move the write before the MMIO write to start the transfer, so it is
protected by that implicit write barrier.
Signed-off-by: Robert Hancock <robert.hancock@...ian.com>
---
.../net/ethernet/xilinx/xilinx_axienet_main.c | 26 +++++++++++--------
1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d6fc3f7acdf0..1e0f70e53991 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -746,7 +746,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
/* Ensure we see all descriptor updates from device or TX IRQ path */
rmb();
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
+ lp->tx_bd_num];
if (cur_p->cntrl)
return NETDEV_TX_BUSY;
return 0;
@@ -807,12 +808,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_index_off;
skb_frag_t *frag;
dma_addr_t tail_p, phys;
+ u32 orig_tail_ptr, new_tail_ptr;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- u32 orig_tail_ptr = lp->tx_bd_tail;
+
+ orig_tail_ptr = lp->tx_bd_tail;
+ new_tail_ptr = orig_tail_ptr;
num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ cur_p = &lp->tx_bd_v[orig_tail_ptr];
if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
/* Should not happen as last start_xmit call should have
@@ -852,9 +856,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ cur_p = &lp->tx_bd_v[new_tail_ptr];
frag = &skb_shinfo(skb)->frags[ii];
phys = dma_map_single(lp->dev,
skb_frag_address(frag),
@@ -866,8 +870,6 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ndev->stats.tx_dropped++;
axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
NULL);
- lp->tx_bd_tail = orig_tail_ptr;
-
return NETDEV_TX_OK;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -877,11 +879,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
cur_p->skb = skb;
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
/* Stop queue if next transmit may not have space */
if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
--
2.31.1
Powered by blists - more mailing lists