[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID:
<PAXPR04MB8510A564C6DE0459BC7E2A46888AA@PAXPR04MB8510.eurprd04.prod.outlook.com>
Date: Sat, 17 Jan 2026 02:16:18 +0000
From: Wei Fang <wei.fang@....com>
To: Frank Li <frank.li@....com>
CC: Shenwei Wang <shenwei.wang@....com>, Clark Wang <xiaoning.wang@....com>,
"andrew+netdev@...n.ch" <andrew+netdev@...n.ch>, "davem@...emloft.net"
<davem@...emloft.net>, "edumazet@...gle.com" <edumazet@...gle.com>,
"kuba@...nel.org" <kuba@...nel.org>, "pabeni@...hat.com" <pabeni@...hat.com>,
"ast@...nel.org" <ast@...nel.org>, "daniel@...earbox.net"
<daniel@...earbox.net>, "hawk@...nel.org" <hawk@...nel.org>,
"john.fastabend@...il.com" <john.fastabend@...il.com>, "sdf@...ichev.me"
<sdf@...ichev.me>, "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"imx@...ts.linux.dev" <imx@...ts.linux.dev>, "bpf@...r.kernel.org"
<bpf@...r.kernel.org>
Subject: RE: [PATCH v2 net-next 12/14] net: fec: add
fec_alloc_rxq_buffers_pp() to allocate buffers from page pool
> On Fri, Jan 16, 2026 at 03:40:25PM +0800, Wei Fang wrote:
> > Currently, the buffers of RX queue are allocated from the page pool.
> > In the subsequent patches to support XDP zero copy, the RX buffers
> > will be allocated from the UMEM. Therefore, extract
> > fec_alloc_rxq_buffers_pp() from fec_enet_alloc_rxq_buffers() and we
> > will add another helper to allocate RX buffers from UMEM for the XDP zero
> copy mode.
> >
> > Signed-off-by: Wei Fang <wei.fang@....com>
> > ---
> > drivers/net/ethernet/freescale/fec_main.c | 78
> > ++++++++++++++++-------
> > 1 file changed, 54 insertions(+), 24 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/freescale/fec_main.c
> > b/drivers/net/ethernet/freescale/fec_main.c
> > index a418f0153d43..68aa94dd9487 100644
> > --- a/drivers/net/ethernet/freescale/fec_main.c
> > +++ b/drivers/net/ethernet/freescale/fec_main.c
> > @@ -3435,6 +3435,24 @@ static void fec_xdp_rxq_info_unreg(struct
> fec_enet_priv_rx_q *rxq)
> > }
> > }
> >
> > +static void fec_free_rxq_buffers(struct fec_enet_priv_rx_q *rxq) {
> > + int i;
> > +
> > + for (i = 0; i < rxq->bd.ring_size; i++) {
> > + struct page *page = rxq->rx_buf[i];
> > +
> > + if (!page)
> > + continue;
> > +
> > + page_pool_put_full_page(rxq->page_pool, page, false);
> > + rxq->rx_buf[i] = NULL;
> > + }
> > +
> > + page_pool_destroy(rxq->page_pool);
> > + rxq->page_pool = NULL;
> > +}
> > +
> > static void fec_enet_free_buffers(struct net_device *ndev) {
> > struct fec_enet_private *fep = netdev_priv(ndev); @@ -3448,16
> > +3466,10 @@ static void fec_enet_free_buffers(struct net_device *ndev)
> > rxq = fep->rx_queue[q];
> >
> > fec_xdp_rxq_info_unreg(rxq);
> > -
> > - for (i = 0; i < rxq->bd.ring_size; i++)
> > - page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
> > - false);
> > + fec_free_rxq_buffers(rxq);
> >
> > for (i = 0; i < XDP_STATS_TOTAL; i++)
> > rxq->stats[i] = 0;
> > -
> > - page_pool_destroy(rxq->page_pool);
> > - rxq->page_pool = NULL;
> > }
> >
> > for (q = 0; q < fep->num_tx_queues; q++) { @@ -3556,22 +3568,18 @@
> > static int fec_enet_alloc_queue(struct net_device *ndev)
> > return ret;
> > }
> >
> > -static int
> > -fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int
> > queue)
> > +static int fec_alloc_rxq_buffers_pp(struct fec_enet_private *fep,
> > + struct fec_enet_priv_rx_q *rxq)
> > {
> > - struct fec_enet_private *fep = netdev_priv(ndev);
> > - struct fec_enet_priv_rx_q *rxq;
> > + struct bufdesc *bdp = rxq->bd.base;
> > dma_addr_t phys_addr;
> > - struct bufdesc *bdp;
> > struct page *page;
> > int i, err;
> >
> > - rxq = fep->rx_queue[queue];
> > - bdp = rxq->bd.base;
> > -
> > err = fec_enet_create_page_pool(fep, rxq);
> > if (err < 0) {
> > - netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
> > + netdev_err(fep->netdev, "%s failed queue %d (%d)\n",
> > + __func__, rxq->bd.qid, err);
> > return err;
> > }
> >
> > @@ -3590,8 +3598,10 @@ fec_enet_alloc_rxq_buffers(struct net_device
> > *ndev, unsigned int queue)
> >
> > for (i = 0; i < rxq->bd.ring_size; i++) {
> > page = page_pool_dev_alloc_pages(rxq->page_pool);
> > - if (!page)
> > - goto err_alloc;
> > + if (!page) {
> > + err = -ENOMEM;
> > + goto free_rx_buffers;
>
> look like this part is bug fix, miss set err to -ENOMEM
>
This is not a bug fix, the previous logic returned "-ENOMEM" directly
at the err_alloc label, see below.
err_alloc:
fec_enet_free_buffers(ndev);
return -ENOMEM;
Powered by blists - more mailing lists