[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aWpNcsoiQX7WESis@lizhi-Precision-Tower-5810>
Date: Fri, 16 Jan 2026 09:38:42 -0500
From: Frank Li <Frank.li@....com>
To: Wei Fang <wei.fang@....com>
Cc: shenwei.wang@....com, xiaoning.wang@....com, andrew+netdev@...n.ch,
davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
pabeni@...hat.com, ast@...nel.org, daniel@...earbox.net,
hawk@...nel.org, john.fastabend@...il.com, sdf@...ichev.me,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
imx@...ts.linux.dev, bpf@...r.kernel.org
Subject: Re: [PATCH v2 net-next 12/14] net: fec: add
fec_alloc_rxq_buffers_pp() to allocate buffers from page pool
On Fri, Jan 16, 2026 at 03:40:25PM +0800, Wei Fang wrote:
> Currently, the buffers of RX queue are allocated from the page pool. In
> the subsequent patches to support XDP zero copy, the RX buffers will be
> allocated from the UMEM. Therefore, extract fec_alloc_rxq_buffers_pp()
> from fec_enet_alloc_rxq_buffers() and we will add another helper to
> allocate RX buffers from UMEM for the XDP zero copy mode.
>
> Signed-off-by: Wei Fang <wei.fang@....com>
> ---
> drivers/net/ethernet/freescale/fec_main.c | 78 ++++++++++++++++-------
> 1 file changed, 54 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
> index a418f0153d43..68aa94dd9487 100644
> --- a/drivers/net/ethernet/freescale/fec_main.c
> +++ b/drivers/net/ethernet/freescale/fec_main.c
> @@ -3435,6 +3435,24 @@ static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
> }
> }
>
> +static void fec_free_rxq_buffers(struct fec_enet_priv_rx_q *rxq)
> +{
> + int i;
> +
> + for (i = 0; i < rxq->bd.ring_size; i++) {
> + struct page *page = rxq->rx_buf[i];
> +
> + if (!page)
> + continue;
> +
> + page_pool_put_full_page(rxq->page_pool, page, false);
> + rxq->rx_buf[i] = NULL;
> + }
> +
> + page_pool_destroy(rxq->page_pool);
> + rxq->page_pool = NULL;
> +}
> +
> static void fec_enet_free_buffers(struct net_device *ndev)
> {
> struct fec_enet_private *fep = netdev_priv(ndev);
> @@ -3448,16 +3466,10 @@ static void fec_enet_free_buffers(struct net_device *ndev)
> rxq = fep->rx_queue[q];
>
> fec_xdp_rxq_info_unreg(rxq);
> -
> - for (i = 0; i < rxq->bd.ring_size; i++)
> - page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
> - false);
> + fec_free_rxq_buffers(rxq);
>
> for (i = 0; i < XDP_STATS_TOTAL; i++)
> rxq->stats[i] = 0;
> -
> - page_pool_destroy(rxq->page_pool);
> - rxq->page_pool = NULL;
> }
>
> for (q = 0; q < fep->num_tx_queues; q++) {
> @@ -3556,22 +3568,18 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
> return ret;
> }
>
> -static int
> -fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
> +static int fec_alloc_rxq_buffers_pp(struct fec_enet_private *fep,
> + struct fec_enet_priv_rx_q *rxq)
> {
> - struct fec_enet_private *fep = netdev_priv(ndev);
> - struct fec_enet_priv_rx_q *rxq;
> + struct bufdesc *bdp = rxq->bd.base;
> dma_addr_t phys_addr;
> - struct bufdesc *bdp;
> struct page *page;
> int i, err;
>
> - rxq = fep->rx_queue[queue];
> - bdp = rxq->bd.base;
> -
> err = fec_enet_create_page_pool(fep, rxq);
> if (err < 0) {
> - netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
> + netdev_err(fep->netdev, "%s failed queue %d (%d)\n",
> + __func__, rxq->bd.qid, err);
> return err;
> }
>
> @@ -3590,8 +3598,10 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
>
> for (i = 0; i < rxq->bd.ring_size; i++) {
> page = page_pool_dev_alloc_pages(rxq->page_pool);
> - if (!page)
> - goto err_alloc;
> + if (!page) {
> + err = -ENOMEM;
> + goto free_rx_buffers;
look like this part is bug fix, miss set err to -ENOMEM
> + }
>
> phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
> bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
> @@ -3601,6 +3611,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
>
> if (fep->bufdesc_ex) {
> struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
> +
uneccesary change
Frank
> ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
> }
>
> @@ -3611,15 +3622,34 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
> bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
> bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
>
> - err = fec_xdp_rxq_info_reg(fep, rxq);
> + return 0;
> +
> +free_rx_buffers:
> + fec_free_rxq_buffers(rxq);
> +
> + return err;
> +}
> +
> +static int
> +fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
> +{
> + struct fec_enet_private *fep = netdev_priv(ndev);
> + struct fec_enet_priv_rx_q *rxq;
> + int err;
> +
> + rxq = fep->rx_queue[queue];
> + err = fec_alloc_rxq_buffers_pp(fep, rxq);
> if (err)
> - goto err_alloc;
> + return err;
>
> - return 0;
> + err = fec_xdp_rxq_info_reg(fep, rxq);
> + if (err) {
> + fec_free_rxq_buffers(rxq);
>
> - err_alloc:
> - fec_enet_free_buffers(ndev);
> - return -ENOMEM;
> + return err;
> + }
> +
> + return 0;
> }
>
> static int
> --
> 2.34.1
>
Powered by blists - more mailing lists