[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250305-afabre-traits-010-rfc2-v1-14-d0ecfb869797@cloudflare.com>
Date: Wed, 05 Mar 2025 15:32:11 +0100
From: arthur@...hurfabre.com
To: netdev@...r.kernel.org, bpf@...r.kernel.org
Cc: jakub@...udflare.com, hawk@...nel.org, yan@...udflare.com,
jbrandeburg@...udflare.com, thoiland@...hat.com, lbiancon@...hat.com,
Arthur Fabre <afabre@...udflare.com>
Subject: [PATCH RFC bpf-next 14/20] mlx5: Propagate trait presence to skb
From: Arthur Fabre <afabre@...udflare.com>
Call the common xdp_buff_update_skb() helper.
Signed-off-by: Arthur Fabre <afabre@...udflare.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 77bace3b212ae18c420a11312a5e3043b5e3f4ae..4ced9109a8f2a047992ab96fa533ad2a7283bb91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1632,7 +1632,8 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
- u32 cqe_bcnt, u32 metasize)
+ u32 cqe_bcnt, u32 metasize,
+ struct mlx5e_xdp_buff *mxbuf)
{
struct sk_buff *skb = napi_build_skb(va, frag_size);
@@ -1646,6 +1647,8 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
if (metasize)
skb_metadata_set(skb, metasize);
+ if (mxbuf)
+ xdp_buff_update_skb(&mxbuf->xdp, skb);
return skb;
}
@@ -1696,7 +1699,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize, mxbuf);
if (unlikely(!skb))
return NULL;
@@ -1772,7 +1775,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
skb = mlx5e_build_linear_skb(rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
mxbuf->xdp.data_end - mxbuf->xdp.data,
- mxbuf->xdp.data - mxbuf->xdp.data_meta);
+ mxbuf->xdp.data - mxbuf->xdp.data_meta, mxbuf);
if (unlikely(!skb))
return NULL;
@@ -2071,7 +2074,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
skb = mlx5e_build_linear_skb(rq, mxbuf->xdp.data_hard_start,
linear_frame_sz,
mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
- mxbuf->xdp.data - mxbuf->xdp.data_meta);
+ mxbuf->xdp.data - mxbuf->xdp.data_meta, mxbuf);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq, &wi->linear_page);
return NULL;
@@ -2168,7 +2171,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize, mxbuf);
if (unlikely(!skb))
return NULL;
@@ -2202,7 +2205,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
net_prefetchw(hdr);
net_prefetch(data);
- skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
+ skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0, NULL);
if (unlikely(!skb))
return NULL;
--
2.43.0
Powered by blists - more mailing lists