[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250828-cpaasch-pf-927-netmlx5-avoid-copying-the-payload-to-the-malloced-area-v4-1-bfcd5033a77c@openai.com>
Date: Thu, 28 Aug 2025 20:36:18 -0700
From: Christoph Paasch via B4 Relay <devnull+cpaasch.openai.com@...nel.org>
To: Gal Pressman <gal@...dia.com>, Dragos Tatulea <dtatulea@...dia.com>,
Saeed Mahameed <saeedm@...dia.com>, Tariq Toukan <tariqt@...dia.com>,
Mark Bloch <mbloch@...dia.com>, Leon Romanovsky <leon@...nel.org>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>
Cc: netdev@...r.kernel.org, linux-rdma@...r.kernel.org, bpf@...r.kernel.org,
Christoph Paasch <cpaasch@...nai.com>
Subject: [PATCH net-next v4 1/2] net/mlx5: DMA-sync earlier in
mlx5e_skb_from_cqe_mpwrq_nonlinear
From: Christoph Paasch <cpaasch@...nai.com>
Doing the call to dma_sync_single_for_cpu() earlier will allow us to
adjust headlen based on the actual size of the protocol headers.
Doing this earlier means that we don't need to call
mlx5e_copy_skb_header() anymore and rather can call
skb_copy_to_linear_data() directly.
Signed-off-by: Christoph Paasch <cpaasch@...nai.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 19 ++++++++++++-------
1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index b8c609d91d11bd315e8fb67f794a91bd37cd28c0..8bedbda522808cbabc8e62ae91a8c25d66725ebb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -2005,17 +2005,19 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct skb_shared_info *sinfo;
unsigned int truesize = 0;
struct bpf_prog *prog;
+ void *va, *head_addr;
struct sk_buff *skb;
u32 linear_frame_sz;
u16 linear_data_len;
u16 linear_hr;
- void *va;
prog = rcu_dereference(rq->xdp_prog);
+ head_addr = netmem_address(head_page->netmem) + head_offset;
+
if (prog) {
/* area for bpf_xdp_[store|load]_bytes */
- net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
+ net_prefetchw(head_addr);
if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
&wi->linear_page))) {
rq->stats->buff_alloc_err++;
@@ -2028,6 +2030,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
linear_data_len = 0;
linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
} else {
+ dma_addr_t addr;
+
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
if (unlikely(!skb)) {
@@ -2039,6 +2043,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
net_prefetchw(va); /* xdp_frame data area */
net_prefetchw(skb->data);
+ addr = page_pool_get_dma_addr_netmem(head_page->netmem);
+ dma_sync_single_for_cpu(rq->pdev, addr + head_offset, headlen,
+ rq->buff.map_dir);
+
frag_offset += headlen;
byte_cnt -= headlen;
linear_hr = skb_headroom(skb);
@@ -2117,8 +2125,6 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
__pskb_pull_tail(skb, headlen);
} else {
- dma_addr_t addr;
-
if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
@@ -2133,9 +2139,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (++pagep < frag_page);
}
/* copy header */
- addr = page_pool_get_dma_addr_netmem(head_page->netmem);
- mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
- head_offset, head_offset, headlen);
+ skb_copy_to_linear_data(skb, head_addr, headlen);
+
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
--
2.50.1
Powered by blists - more mailing lists