[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250924054704.2795474-1-jianpeng.chang.cn@windriver.com>
Date: Wed, 24 Sep 2025 13:47:04 +0800
From: Jianpeng Chang <jianpeng.chang.cn@...driver.com>
To: <claudiu.manoil@....com>, <vladimir.oltean@....com>, <wei.fang@....com>,
<xiaoning.wang@....com>, <andrew+netdev@...n.ch>,
<davem@...emloft.net>, <edumazet@...gle.com>, <kuba@...nel.org>,
<pabeni@...hat.com>, <alexandru.marginean@....com>
CC: <imx@...ts.linux.dev>, <netdev@...r.kernel.org>,
<linux-kernel@...r.kernel.org>,
Jianpeng Chang
<jianpeng.chang.cn@...driver.com>
Subject: [PATCH] net: enetc: fix the deadlock of enetc_mdio_lock
After applying the workaround for err050089, the LS1028A platform
experiences RCU stalls on RT kernel. This issue is caused by the
recursive acquisition of the read lock enetc_mdio_lock. Here list some
of the call stacks identified under the enetc_poll path that may lead to
a deadlock:
enetc_poll
-> enetc_lock_mdio
-> enetc_clean_rx_ring OR napi_complete_done
-> napi_gro_receive
-> enetc_start_xmit
-> enetc_lock_mdio
-> enetc_map_tx_buffs
-> enetc_unlock_mdio
-> enetc_unlock_mdio
After enetc_poll acquires the read lock, a higher-priority writer attempts
to acquire the lock, causing preemption. The writer detects that a
read lock is already held and is scheduled out. However, readers under
enetc_poll cannot acquire the read lock again because a writer is already
waiting, leading to a thread hang.
Currently, the deadlock is avoided by adjusting enetc_lock_mdio to prevent
recursive lock acquisition.
Fixes: fd5736bf9f23 ("enetc: Workaround for MDIO register access issue")
Signed-off-by: Jianpeng Chang <jianpeng.chang.cn@...driver.com>
---
drivers/net/ethernet/freescale/enetc/enetc.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index e4287725832e..164d2e9ec68c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1558,6 +1558,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
@@ -1593,7 +1595,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
}
rx_ring->next_to_clean = i;
@@ -1601,6 +1605,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ enetc_unlock_mdio();
return rx_frm_cnt;
}
@@ -1910,6 +1915,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
struct xdp_buff xdp_buff;
@@ -1973,7 +1980,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
*/
enetc_bulk_flip_buff(rx_ring, orig_i, i);
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
@@ -2038,6 +2047,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
rx_ring->xdp.xdp_tx_in_flight);
+ enetc_unlock_mdio();
return rx_frm_cnt;
}
@@ -2056,6 +2066,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
+ enetc_unlock_mdio();
prog = rx_ring->xdp.prog;
if (prog)
@@ -2068,7 +2079,6 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = true;
if (!complete) {
- enetc_unlock_mdio();
return budget;
}
@@ -2079,6 +2089,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
--
2.51.0
Powered by blists - more mailing lists