[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250923134742.1399800-6-maxtram95@gmail.com>
Date: Tue, 23 Sep 2025 16:47:30 +0300
From: Maxim Mikityanskiy <maxtram95@...il.com>
To: Daniel Borkmann <daniel@...earbox.net>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
David Ahern <dsahern@...nel.org>,
Nikolay Aleksandrov <razor@...ckwall.org>
Cc: netdev@...r.kernel.org,
tcpdump-workers@...ts.tcpdump.org,
Guy Harris <gharris@...ic.net>,
Michael Richardson <mcr@...delman.ca>,
Denis Ovsienko <denis@...ienko.info>,
Xin Long <lucien.xin@...il.com>,
Maxim Mikityanskiy <maxim@...valent.com>
Subject: [PATCH net-next 05/17] net/mlx5e: Remove jumbo_remove step from TX path
From: Maxim Mikityanskiy <maxim@...valent.com>
From: Maxim Mikityanskiy <maxim@...valent.com>
Now that the kernel doesn't insert HBH for BIG TCP IPv6 packets, remove
unnecessary steps from the mlx5e and mlx5i TX path, that used to check
and remove HBH.
Signed-off-by: Maxim Mikityanskiy <maxim@...valent.com>
---
.../net/ethernet/mellanox/mlx5/core/en_tx.c | 75 +++----------------
1 file changed, 12 insertions(+), 63 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index b7227afcb51d..0b15e141567e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -152,12 +152,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
* to inline later in the transmit descriptor
*/
static inline u16
-mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
+mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
{
struct mlx5e_sq_stats *stats = sq->stats;
u16 ihs;
- *hopbyhop = 0;
if (skb->encapsulation) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_inner_transport_offset(skb) +
@@ -167,17 +166,12 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
- } else {
+ else
ihs = skb_tcp_all_headers(skb);
- if (ipv6_has_hopopt_jumbo(skb)) {
- *hopbyhop = sizeof(struct hop_jumbo_hdr);
- ihs -= sizeof(struct hop_jumbo_hdr);
- }
- }
stats->tso_packets++;
- stats->tso_bytes += skb->len - ihs - *hopbyhop;
+ stats->tso_bytes += skb->len - ihs;
}
return ihs;
@@ -239,7 +233,6 @@ struct mlx5e_tx_attr {
__be16 mss;
u16 insz;
u8 opcode;
- u8 hopbyhop;
};
struct mlx5e_tx_wqe_attr {
@@ -275,16 +268,14 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_sq_stats *stats = sq->stats;
if (skb_is_gso(skb)) {
- int hopbyhop;
- u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
+ u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
*attr = (struct mlx5e_tx_attr) {
.opcode = MLX5_OPCODE_LSO,
.mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
.ihs = ihs,
.num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
- .headlen = skb_headlen(skb) - ihs - hopbyhop,
- .hopbyhop = hopbyhop,
+ .headlen = skb_headlen(skb) - ihs,
};
stats->packets += skb_shinfo(skb)->gso_segs;
@@ -439,7 +430,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi;
u16 ihs = attr->ihs;
- struct ipv6hdr *h6;
struct mlx5e_sq_stats *stats = sq->stats;
int num_dma;
@@ -456,28 +446,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (ihs) {
u8 *start = eseg->inline_hdr.start;
- if (unlikely(attr->hopbyhop)) {
- /* remove the HBH header.
- * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
- */
- if (skb_vlan_tag_present(skb)) {
- mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
- ihs += VLAN_HLEN;
- h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
- } else {
- unsafe_memcpy(start, skb->data,
- ETH_HLEN + sizeof(*h6),
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- h6 = (struct ipv6hdr *)(start + ETH_HLEN);
- }
- h6->nexthdr = IPPROTO_TCP;
- /* Copy the TCP header after the IPv6 one */
- memcpy(h6 + 1,
- skb->data + ETH_HLEN + sizeof(*h6) +
- sizeof(struct hop_jumbo_hdr),
- tcp_hdrlen(skb));
- /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
- } else if (skb_vlan_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(start, skb, ihs);
ihs += VLAN_HLEN;
stats->added_vlan_packets++;
@@ -491,7 +460,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
dseg += wqe_attr->ds_cnt_ids;
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
attr->headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
@@ -1014,34 +983,14 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = attr.mss;
if (attr.ihs) {
- if (unlikely(attr.hopbyhop)) {
- struct ipv6hdr *h6;
-
- /* remove the HBH header.
- * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
- */
- unsafe_memcpy(eseg->inline_hdr.start, skb->data,
- ETH_HLEN + sizeof(*h6),
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
- h6->nexthdr = IPPROTO_TCP;
- /* Copy the TCP header after the IPv6 one */
- unsafe_memcpy(h6 + 1,
- skb->data + ETH_HLEN + sizeof(*h6) +
- sizeof(struct hop_jumbo_hdr),
- tcp_hdrlen(skb),
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
- } else {
- unsafe_memcpy(eseg->inline_hdr.start, skb->data,
- attr.ihs,
- MLX5_UNSAFE_MEMCPY_DISCLAIMER);
- }
+ unsafe_memcpy(eseg->inline_hdr.start, skb->data,
+ attr.ihs,
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
dseg += wqe_attr.ds_cnt_inl;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
attr.headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
--
2.50.1
Powered by blists - more mailing lists