[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID:
<PAXPR04MB8510D091184E33C4CD91A2D48889A@PAXPR04MB8510.eurprd04.prod.outlook.com>
Date: Tue, 20 Jan 2026 06:39:56 +0000
From: Wei Fang <wei.fang@....com>
To: Frank Li <frank.li@....com>
CC: Shenwei Wang <shenwei.wang@....com>, Clark Wang <xiaoning.wang@....com>,
"andrew+netdev@...n.ch" <andrew+netdev@...n.ch>, "davem@...emloft.net"
<davem@...emloft.net>, "edumazet@...gle.com" <edumazet@...gle.com>,
"kuba@...nel.org" <kuba@...nel.org>, "pabeni@...hat.com" <pabeni@...hat.com>,
"ast@...nel.org" <ast@...nel.org>, "daniel@...earbox.net"
<daniel@...earbox.net>, "hawk@...nel.org" <hawk@...nel.org>,
"john.fastabend@...il.com" <john.fastabend@...il.com>, "sdf@...ichev.me"
<sdf@...ichev.me>, "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"imx@...ts.linux.dev" <imx@...ts.linux.dev>, "bpf@...r.kernel.org"
<bpf@...r.kernel.org>
Subject: RE: [PATCH v2 net-next 11/14] net: fec: move xdp_rxq_info* APIs out
of fec_enet_create_page_pool()
> On Fri, Jan 16, 2026 at 03:40:24PM +0800, Wei Fang wrote:
> > Extract fec_xdp_rxq_info_reg() from fec_enet_create_page_pool() and move
> > it out of fec_enet_create_page_pool(), so that it can be reused in the
> > subsequent patches to support XDP zero copy mode.
> >
> > Signed-off-by: Wei Fang <wei.fang@....com>
> > ---
> > drivers/net/ethernet/freescale/fec_main.c | 58 ++++++++++++++++-------
> > 1 file changed, 40 insertions(+), 18 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/freescale/fec_main.c
> b/drivers/net/ethernet/freescale/fec_main.c
> > index c1786ccf0443..a418f0153d43 100644
> > --- a/drivers/net/ethernet/freescale/fec_main.c
> > +++ b/drivers/net/ethernet/freescale/fec_main.c
> > @@ -489,23 +489,7 @@ fec_enet_create_page_pool(struct
> fec_enet_private *fep,
> > return err;
> > }
> >
> > - err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
> > - if (err < 0)
> > - goto err_free_pp;
> > -
> > - err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
> MEM_TYPE_PAGE_POOL,
> > - rxq->page_pool);
> > - if (err)
> > - goto err_unregister_rxq;
> > -
> > return 0;
> > -
> > -err_unregister_rxq:
> > - xdp_rxq_info_unreg(&rxq->xdp_rxq);
> > -err_free_pp:
> > - page_pool_destroy(rxq->page_pool);
> > - rxq->page_pool = NULL;
> > - return err;
>
> Noramlly this patch should put helper fec_xdp_rxq_info_reg() before
> fec_enet_create_page_pool(). then call fec_xdp_rxq_info_reg() here.
The main purpose of this patch is to move the xdp-related logic out of
fec_enet_create_page_pool(), so there is no need to use two patches
to solve such a trivial matter.
>
> > }
> >
> > static void fec_txq_trigger_xmit(struct fec_enet_private *fep,
> > @@ -3419,6 +3403,38 @@ static const struct ethtool_ops
> fec_enet_ethtool_ops = {
> > .self_test = net_selftest,
> > };
> >
> > +static int fec_xdp_rxq_info_reg(struct fec_enet_private *fep,
> > + struct fec_enet_priv_rx_q *rxq)
> > +{
> > + struct net_device *ndev = fep->netdev;
> > + int err;
> > +
> > + err = xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq->id, 0);
> > + if (err) {
> > + netdev_err(ndev, "Failed to register xdp rxq info\n");
> > + return err;
> > + }
> > +
> > + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
> MEM_TYPE_PAGE_POOL,
> > + rxq->page_pool);
> > + if (err) {
> > + netdev_err(ndev, "Failed to register XDP mem model\n");
> > + xdp_rxq_info_unreg(&rxq->xdp_rxq);
> > +
> > + return err;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
> > +{
> > + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) {
> > + xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq);
> > + xdp_rxq_info_unreg(&rxq->xdp_rxq);
> > + }
> > +}
> > +
> > static void fec_enet_free_buffers(struct net_device *ndev)
> > {
> > struct fec_enet_private *fep = netdev_priv(ndev);
> > @@ -3430,6 +3446,9 @@ static void fec_enet_free_buffers(struct net_device
> *ndev)
> >
> > for (q = 0; q < fep->num_rx_queues; q++) {
> > rxq = fep->rx_queue[q];
> > +
> > + fec_xdp_rxq_info_unreg(rxq);
> > +
> > for (i = 0; i < rxq->bd.ring_size; i++)
> > page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
> > false);
> > @@ -3437,8 +3456,6 @@ static void fec_enet_free_buffers(struct net_device
> *ndev)
> > for (i = 0; i < XDP_STATS_TOTAL; i++)
> > rxq->stats[i] = 0;
> >
> > - if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
> > - xdp_rxq_info_unreg(&rxq->xdp_rxq);
>
> why put fec_xdp_rxq_info_unreg() here to do exactly replacement.
>
It's fine to put it in the original position, the only reason is that I want
the order to be reversed from that in fec_enet_alloc_rxq_buffers().
> Frank
> > page_pool_destroy(rxq->page_pool);
> > rxq->page_pool = NULL;
> > }
> > @@ -3593,6 +3610,11 @@ fec_enet_alloc_rxq_buffers(struct net_device
> *ndev, unsigned int queue)
> > /* Set the last buffer to wrap. */
> > bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
> > bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
> > +
> > + err = fec_xdp_rxq_info_reg(fep, rxq);
> > + if (err)
> > + goto err_alloc;
> > +
> > return 0;
> >
> > err_alloc:
> > --
> > 2.34.1
> >
Powered by blists - more mailing lists