lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID:
 <PAXPR04MB85104C04A6B18F1DBB137ED6888AA@PAXPR04MB8510.eurprd04.prod.outlook.com>
Date: Sat, 17 Jan 2026 02:32:27 +0000
From: Wei Fang <wei.fang@....com>
To: Frank Li <frank.li@....com>
CC: Shenwei Wang <shenwei.wang@....com>, Clark Wang <xiaoning.wang@....com>,
	"andrew+netdev@...n.ch" <andrew+netdev@...n.ch>, "davem@...emloft.net"
	<davem@...emloft.net>, "edumazet@...gle.com" <edumazet@...gle.com>,
	"kuba@...nel.org" <kuba@...nel.org>, "pabeni@...hat.com" <pabeni@...hat.com>,
	"ast@...nel.org" <ast@...nel.org>, "daniel@...earbox.net"
	<daniel@...earbox.net>, "hawk@...nel.org" <hawk@...nel.org>,
	"john.fastabend@...il.com" <john.fastabend@...il.com>, "sdf@...ichev.me"
	<sdf@...ichev.me>, "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
	"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
	"imx@...ts.linux.dev" <imx@...ts.linux.dev>, "bpf@...r.kernel.org"
	<bpf@...r.kernel.org>
Subject: RE: [PATCH v2 net-next 06/14] net: fec: add fec_enet_rx_queue_xdp()
 for XDP path

> > +static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
> > +				 int budget, struct bpf_prog *prog)
> > +{
> > +	u32 data_start = FEC_ENET_XDP_HEADROOM + fep->rx_shift;
> > +	struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
> > +	struct net_device *ndev = fep->netdev;
> > +	struct bufdesc *bdp = rxq->bd.cur;
> > +	u32 sub_len = 4 + fep->rx_shift;
> > +	int cpu = smp_processor_id();
> > +	int pkt_received = 0;
> > +	struct sk_buff *skb;
> > +	u16 status, pkt_len;
> > +	struct xdp_buff xdp;
> > +	int tx_qid = queue;
> > +	struct page *page;
> > +	u32 xdp_res = 0;
> > +	dma_addr_t dma;
> > +	int index, err;
> > +	u32 act, sync;
> > +
> > +#if defined(CONFIG_COLDFIRE)
> && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
> > +	/*
> > +	 * Hacky flush of all caches instead of using the DMA API for the TSO
> > +	 * headers.
> > +	 */
> > +	flush_cache_all();
> > +#endif
> > +
> > +	if (unlikely(queue >= fep->num_tx_queues))
> > +		tx_qid = fec_enet_xdp_get_tx_queue(fep, cpu);
> > +
> > +	xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
> > +
> > +	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
> > +		if (pkt_received >= budget)
> > +			break;
> > +		pkt_received++;
> > +
> > +		writel(FEC_ENET_RXF_GET(queue), fep->hwp + FEC_IEVENT);
> > +
> > +		/* Check for errors. */
> > +		status ^= BD_ENET_RX_LAST;
> > +		if (unlikely(fec_rx_error_check(ndev, status)))
> > +			goto rx_processing_done;
> > +
> > +		/* Process the incoming frame. */
> > +		ndev->stats.rx_packets++;
> > +		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
> > +		ndev->stats.rx_bytes += pkt_len - fep->rx_shift;
> > +
> > +		index = fec_enet_get_bd_index(bdp, &rxq->bd);
> > +		page = rxq->rx_buf[index];
> > +		dma = fec32_to_cpu(bdp->cbd_bufaddr);
> > +
> > +		if (fec_enet_update_cbd(rxq, bdp, index)) {
> > +			ndev->stats.rx_dropped++;
> > +			goto rx_processing_done;
> > +		}
> > +
> > +		dma_sync_single_for_cpu(&fep->pdev->dev, dma, pkt_len,
> > +					DMA_FROM_DEVICE);
> > +		prefetch(page_address(page));
> > +
> > +		xdp_buff_clear_frags_flag(&xdp);
> > +		/* subtract 16bit shift and FCS */
> > +		pkt_len -= sub_len;
> > +		xdp_prepare_buff(&xdp, page_address(page), data_start,
> > +				 pkt_len, false);
> > +
> > +		act = bpf_prog_run_xdp(prog, &xdp);
> > +		/* Due xdp_adjust_tail and xdp_adjust_head: DMA sync
> > +		 * for_device cover max len CPU touch.
> > +		 */
> > +		sync = xdp.data_end - xdp.data;
> > +		sync = max(sync, pkt_len);
> > +
> > +		switch (act) {
> > +		case XDP_PASS:
> > +			rxq->stats[RX_XDP_PASS]++;
> > +			/* The packet length includes FCS, but we don't want to
> > +			 * include that when passing upstream as it messes up
> > +			 * bridging applications.
> > +			 */
> > +			skb = fec_build_skb(fep, rxq, bdp, page, pkt_len);
> > +			if (!skb) {
> > +				fec_xdp_drop(rxq, &xdp, sync);
> > +				trace_xdp_exception(ndev, prog, XDP_PASS);
> > +			} else {
> > +				napi_gro_receive(&fep->napi, skb);
> > +			}
> > +			break;
> > +		case XDP_REDIRECT:
> > +			rxq->stats[RX_XDP_REDIRECT]++;
> > +			err = xdp_do_redirect(ndev, &xdp, prog);
> > +			if (unlikely(err)) {
> > +				fec_xdp_drop(rxq, &xdp, sync);
> > +				trace_xdp_exception(ndev, prog, XDP_REDIRECT);
> > +			} else {
> > +				xdp_res |= FEC_ENET_XDP_REDIR;
> > +			}
> > +			break;
> > +		case XDP_TX:
> > +			rxq->stats[RX_XDP_TX]++;
> > +			err = fec_enet_xdp_tx_xmit(fep, cpu, &xdp, sync, tx_qid);
> > +			if (unlikely(err)) {
> > +				rxq->stats[RX_XDP_TX_ERRORS]++;
> > +				fec_xdp_drop(rxq, &xdp, sync);
> > +				trace_xdp_exception(ndev, prog, XDP_TX);
> > +			}
> > +			break;
> > +		default:
> > +			bpf_warn_invalid_xdp_action(ndev, prog, act);
> > +			fallthrough;
> > +		case XDP_ABORTED:
> > +			/* handle aborts by dropping packet */
> > +			fallthrough;
> > +		case XDP_DROP:
> > +			rxq->stats[RX_XDP_DROP]++;
> > +			fec_xdp_drop(rxq, &xdp, sync);
> > +			break;
> > +		}
> > +
> > +rx_processing_done:
> > +		/* Clear the status flags for this buffer */
> > +		status &= ~BD_ENET_RX_STATS;
> > +		/* Mark the buffer empty */
> > +		status |= BD_ENET_RX_EMPTY;
> > +
> > +		if (fep->bufdesc_ex) {
> > +			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
> > +
> > +			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
> > +			ebdp->cbd_prot = 0;
> > +			ebdp->cbd_bdu = 0;
> > +		}
> > +
> > +		/* Make sure the updates to rest of the descriptor are
> > +		 * performed before transferring ownership.
> > +		 */
> > +		dma_wmb();
> > +		bdp->cbd_sc = cpu_to_fec16(status);
> > +
> > +		/* Update BD pointer to next entry */
> > +		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
> > +
> > +		/* Doing this here will keep the FEC running while we process
> > +		 * incoming frames. On a heavily loaded network, we should be
> > +		 * able to keep up at the expense of system resources.
> > +		 */
> > +		writel(0, rxq->bd.reg_desc_active);
> > +	}
> > +
> > +	rxq->bd.cur = bdp;
> > +
> > +	if (xdp_res & FEC_ENET_XDP_REDIR)
> >  		xdp_do_flush();
> >
> >  	return pkt_received;
> > @@ -1970,11 +2061,17 @@ static int fec_enet_rx_queue(struct
> fec_enet_private *fep,
> >  static int fec_enet_rx(struct net_device *ndev, int budget)
> >  {
> >  	struct fec_enet_private *fep = netdev_priv(ndev);
> > +	struct bpf_prog *prog = READ_ONCE(fep->xdp_prog);
> >  	int i, done = 0;
> >
> >  	/* Make sure that AVB queues are processed first. */
> > -	for (i = fep->num_rx_queues - 1; i >= 0; i--)
> > -		done += fec_enet_rx_queue(fep, i, budget - done);
> > +	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
> > +		if (prog)
> > +			done += fec_enet_rx_queue_xdp(fep, i, budget - done,
> > +						      prog);
> 
> Patch still is hard to review. It may be simpe if
> 1. create new patch cp fec_enet_rx_queue() to fec_enet_rx_queue_xdp().
> 2. the change may small if base on 1.
> 

fec_enet_rx_queue_xdp() is basically the same as fec_enet_rx_queue(),
the biggest difference is probably the removal of the fec_enet_run_xdp()
function and the relocation of its code to fec_enet_rx_queue_xdp().

The current patch set already has 14 patches, which is close to the
15-patch limit. I would like to leave the last new patch for the comment
below, because it does indeed differ somewhat from the previous logic.

> > +		else
> > +			done += fec_enet_rx_queue(fep, i, budget - done);
> > +	}
> >
> >  	return done;
> >  }
> > @@ -3854,15 +3951,6 @@ static int fec_enet_bpf(struct net_device *dev,
> struct netdev_bpf *bpf)
> >  	}
> >  }
> >
> > -static int
> > -fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
> > -{
> > -	if (unlikely(index < 0))
> > -		return 0;
> > -
> > -	return (index % fep->num_tx_queues);
> > -}
> > -
> >  static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
> >  				   struct fec_enet_priv_tx_q *txq,
> >  				   void *frame, u32 dma_sync_len,
> > @@ -3956,15 +4044,11 @@ static int fec_enet_txq_xmit_frame(struct
> fec_enet_private *fep,
> >
> >  static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
> >  				int cpu, struct xdp_buff *xdp,
> > -				u32 dma_sync_len)
> > +				u32 dma_sync_len, int queue)
> 
> you can split it new patch, just add queue id at fec_enet_xdp_tx_xmit().
> 

Yes, this is a new change to the previous logic, I will add a new patch.


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ