[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251208102236.185367-1-theo.lebrun@bootlin.com>
Date: Mon, 8 Dec 2025 11:22:36 +0100
From: Théo Lebrun <theo.lebrun@...tlin.com>
To: theo.lebrun@...tlin.com
Cc: andrew+netdev@...n.ch,
benoit.monin@...tlin.com,
claudiu.beznea@...on.dev,
davem@...emloft.net,
edumazet@...gle.com,
gregory.clement@...tlin.com,
kuba@...nel.org,
lorenzo@...nel.org,
netdev@...r.kernel.org,
nicolas.ferre@...rochip.com,
pabeni@...hat.com,
pvalerio@...hat.com,
thomas.petazzoni@...tlin.com
Subject: [PATCH 1/8] net: macb: move Rx buffers alloc from link up to open
mog_alloc_rx_buffers(), getting called at open, does not do rx buffer
alloc on GEM. The bulk of the work is done by gem_rx_refill() filling
up all slots with valid buffers.
gem_rx_refill() is called at link up by
gem_init_rings() == bp->macbgem_ops.mog_init_rings().
Move operation to macb_open(), mostly to allow it to fail early and
loudly rather than init the device with Rx mostly broken.
About `bool fail_early`:
- When called from macb_open(), ring init fails as soon as a queue
cannot be refilled.
- When called from macb_hresp_error_task(), we do our best to reinit
the device: we still iterate over all queues and try refilling all
even if a previous queue failed.
Signed-off-by: Théo Lebrun <theo.lebrun@...tlin.com>
---
drivers/net/ethernet/cadence/macb.h | 2 +-
drivers/net/ethernet/cadence/macb_main.c | 34 ++++++++++++++++++------
2 files changed, 27 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 87414a2ddf6e..2cb65ec37d44 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1180,7 +1180,7 @@ struct macb_queue;
struct macb_or_gem_ops {
int (*mog_alloc_rx_buffers)(struct macb *bp);
void (*mog_free_rx_buffers)(struct macb *bp);
- void (*mog_init_rings)(struct macb *bp);
+ int (*mog_init_rings)(struct macb *bp, bool fail_early);
int (*mog_rx)(struct macb_queue *queue, struct napi_struct *napi,
int budget);
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index e461f5072884..65431a7e3533 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -705,10 +705,9 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
- * cleared the pipeline and control registers.
+ /* Initialize buffer registers as clearing MACB_BIT(TE) in link
+ * down cleared the pipeline and control registers.
*/
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
@@ -1250,13 +1249,14 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
return packets;
}
-static void gem_rx_refill(struct macb_queue *queue)
+static int gem_rx_refill(struct macb_queue *queue)
{
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
struct macb *bp = queue->bp;
struct macb_dma_desc *desc;
+ int err = 0;
while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
bp->rx_ring_size) > 0) {
@@ -1273,6 +1273,7 @@ static void gem_rx_refill(struct macb_queue *queue)
if (unlikely(!skb)) {
netdev_err(bp->dev,
"Unable to allocate sk_buff\n");
+ err = -ENOMEM;
break;
}
@@ -1322,6 +1323,7 @@ static void gem_rx_refill(struct macb_queue *queue)
netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
queue, queue->rx_prepared_head, queue->rx_tail);
+ return err;
}
/* Mark DMA descriptors from begin up to and not including end as unused */
@@ -1774,7 +1776,7 @@ static void macb_hresp_error_task(struct work_struct *work)
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- bp->macbgem_ops.mog_init_rings(bp);
+ bp->macbgem_ops.mog_init_rings(bp, false);
/* Initialize TX and RX buffers */
macb_init_buffers(bp);
@@ -2549,6 +2551,8 @@ static int macb_alloc_consistent(struct macb *bp)
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
+ if (bp->macbgem_ops.mog_init_rings(bp, true))
+ goto out_err;
/* Required for tie off descriptor for PM cases */
if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
@@ -2580,11 +2584,13 @@ static void macb_init_tieoff(struct macb *bp)
desc->ctrl = 0;
}
-static void gem_init_rings(struct macb *bp)
+static int gem_init_rings(struct macb *bp, bool fail_early)
{
struct macb_queue *queue;
struct macb_dma_desc *desc = NULL;
+ int last_err = 0;
unsigned int q;
+ int err;
int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -2600,13 +2606,24 @@ static void gem_init_rings(struct macb *bp)
queue->rx_tail = 0;
queue->rx_prepared_head = 0;
- gem_rx_refill(queue);
+ /* We get called in two cases:
+ * - open: we can propagate alloc errors (so fail early),
+ * - HRESP error: cannot propagate, we attempt to reinit
+ * all queues in case of failure.
+ */
+ err = gem_rx_refill(queue);
+ if (err) {
+ last_err = err;
+ if (fail_early)
+ break;
+ }
}
macb_init_tieoff(bp);
+ return last_err;
}
-static void macb_init_rings(struct macb *bp)
+static int macb_init_rings(struct macb *bp, bool fail_early)
{
int i;
struct macb_dma_desc *desc = NULL;
@@ -2623,6 +2640,7 @@ static void macb_init_rings(struct macb *bp)
desc->ctrl |= MACB_BIT(TX_WRAP);
macb_init_tieoff(bp);
+ return 0;
}
static void macb_reset_hw(struct macb *bp)
--
2.52.0
Powered by blists - more mailing lists