[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250908195748.1707057-13-anthony.l.nguyen@intel.com>
Date: Mon, 8 Sep 2025 12:57:42 -0700
From: Tony Nguyen <anthony.l.nguyen@...el.com>
To: davem@...emloft.net,
kuba@...nel.org,
pabeni@...hat.com,
edumazet@...gle.com,
andrew+netdev@...n.ch,
netdev@...r.kernel.org
Cc: Alexander Lobakin <aleksander.lobakin@...el.com>,
anthony.l.nguyen@...el.com,
michal.kubiak@...el.com,
maciej.fijalkowski@...el.com,
magnus.karlsson@...el.com,
przemyslaw.kitszel@...el.com,
ast@...nel.org,
daniel@...earbox.net,
hawk@...nel.org,
john.fastabend@...il.com,
horms@...nel.org,
sdf@...ichev.me,
nxne.cnse.osdt.itp.upstreaming@...el.com,
bpf@...r.kernel.org,
Ramu R <ramu.r@...el.com>
Subject: [PATCH net-next 12/13] idpf: add support for .ndo_xdp_xmit()
From: Alexander Lobakin <aleksander.lobakin@...el.com>
Use libeth XDP infra to implement .ndo_xdp_xmit() in idpf.
The Tx callbacks are reused from XDP_TX code. XDP redirect target
feature is set/cleared depending on the XDP prog presence, as for now
we still don't allocate XDP Tx queues when there's no program.
Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@...el.com>
Tested-by: Ramu R <ramu.r@...el.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@...el.com>
---
drivers/net/ethernet/intel/idpf/idpf_lib.c | 1 +
drivers/net/ethernet/intel/idpf/xdp.c | 20 ++++++++++++++++++++
drivers/net/ethernet/intel/idpf/xdp.h | 2 ++
3 files changed, 23 insertions(+)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 3bc719c9106f..0559f1da88a9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2617,4 +2617,5 @@ static const struct net_device_ops idpf_netdev_ops = {
.ndo_hwtstamp_get = idpf_hwtstamp_get,
.ndo_hwtstamp_set = idpf_hwtstamp_set,
.ndo_bpf = idpf_xdp,
+ .ndo_xdp_xmit = idpf_xdp_xmit,
};
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
index e6b45df95cd3..b6a8304d61f9 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -322,8 +322,26 @@ LIBETH_XDP_DEFINE_START();
LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
idpf_xdp_tx_xmit);
+LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit);
LIBETH_XDP_DEFINE_END();
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+
+ if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
+ return -ENETDOWN;
+
+ return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
+ &vport->txqs[vport->xdp_txq_offset],
+ vport->num_xdp_txq,
+ idpf_xdp_xmit_flush_bulk,
+ idpf_xdp_tx_finalize);
+}
+
void idpf_xdp_set_features(const struct idpf_vport *vport)
{
if (!idpf_is_queue_model_split(vport->rxq_model))
@@ -378,6 +396,8 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
if (old)
bpf_prog_put(old);
+ libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
+
return ret;
}
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
index 986156162e2d..db8ecc1843fe 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.h
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -102,5 +102,7 @@ static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
void idpf_xdp_set_features(const struct idpf_vport *vport);
int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
#endif /* _IDPF_XDP_H_ */
--
2.47.1
Powered by blists - more mailing lists