[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250228012534.3460918-3-kuba@kernel.org>
Date: Thu, 27 Feb 2025 17:25:27 -0800
From: Jakub Kicinski <kuba@...nel.org>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org,
edumazet@...gle.com,
pabeni@...hat.com,
andrew+netdev@...n.ch,
horms@...nel.org,
michael.chan@...adcom.com,
pavan.chebbi@...adcom.com,
przemyslaw.kitszel@...el.com,
Jakub Kicinski <kuba@...nel.org>
Subject: [PATCH net-next v2 2/9] eth: bnxt: don't run xdp programs on fallback traffic
The XDP program attached to the PF should not be executed
on the fallback traffic. Pass the desired dev to bnxt_rx_xdp()
and abort if the packet is for a representor. bnxt_rx_xdp()
has a lot of arguments already, so presumably adding one
more is okay.
Compile tested only.
Well behaved drivers (nfp) do not execute XDP on fallback
traffic, but perhaps this is a matter of opinion rather than
a hard rule, therefore I'm not considering this a fix.
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
---
v2:
- pass dev to bnxt_rx_xdp(), and skip just the BPF execution,
to avoid unintentionally skipping the Tx ring handling
v1: https://lore.kernel.org/20250226211003.2790916-3-kuba@kernel.org
---
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 3 ++-
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 11 +++++++----
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 8 ++++++--
3 files changed, 15 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0122782400b8..752b6cf0022c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -17,7 +17,8 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
dma_addr_t mapping, u32 len,
struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
-bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+bool bnxt_rx_xdp(struct bnxt *bp, struct net_device *dev,
+ struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f6a26f6f85bb..94bc9121d3f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2036,7 +2036,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- struct net_device *dev = bp->dev;
+ struct net_device *dev;
struct rx_cmp *rxcmp;
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
@@ -2159,6 +2159,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
len = flags >> RX_CMP_LEN_SHIFT;
dma_addr = rx_buf->mapping;
+ dev = bp->dev;
+ if (cmp_type == CMP_TYPE_RX_L2_CMP)
+ dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
+
if (bnxt_xdp_attached(bp, rxr)) {
bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
if (agg_bufs) {
@@ -2172,7 +2176,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (xdp_active) {
- if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
+ if (bnxt_rx_xdp(bp, dev, rxr, cons, &xdp, data, &data_ptr, &len,
+ event)) {
rc = 1;
goto next_rx;
}
@@ -2239,8 +2244,6 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
- if (cmp_type == CMP_TYPE_RX_L2_CMP)
- dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
skb->protocol = eth_type_trans(skb, dev);
if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index e6c64e4bd66c..aba49ddb0e66 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -221,7 +221,8 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
* true - packet consumed by XDP and new buffer is allocated.
* false - packet should be passed to the stack.
*/
-bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+bool bnxt_rx_xdp(struct bnxt *bp, struct net_device *dev,
+ struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event)
{
@@ -246,7 +247,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp->data;
- act = bpf_prog_run_xdp(xdp_prog, xdp);
+ if (bp->dev == dev)
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ else /* packet is for a VF representor */
+ act = XDP_PASS;
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
--
2.48.1
Powered by blists - more mailing lists