[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240529113854.14fd929e@kernel.org>
Date: Wed, 29 May 2024 11:38:54 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: cratiu@...dia.com, rrameshbabu@...dia.com, Raed Salem <raeds@...dia.com>
Cc: Willem de Bruijn <willemdebruijn.kernel@...il.com>,
netdev@...r.kernel.org, pabeni@...hat.com, borisp@...dia.com,
gal@...dia.com, steffen.klassert@...unet.com, tariqt@...dia.com
Subject: Re: [RFC net-next 14/15] net/mlx5e: Add Rx data path offload
On Sun, 12 May 2024 21:54:38 -0400 Willem de Bruijn wrote:
> > + /* TBD: report errors as SW counters to ethtool, any further handling ? */
> > + switch (MLX5_NISP_METADATA_SYNDROM(nisp_meta_data)) {
> > + case MLX5E_NISP_OFFLOAD_RX_SYNDROME_DECRYPTED:
> > + if (psp_rcv(skb))
> > + netdev_warn_once(netdev, "PSP handling failed");
> > + skb->decrypted = 1;
>
> Do not set skb->decrypted if psp_rcv failed? But drop the packet and
> account the drop, likely.
nVidia folks does this seem reasonable?
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.c
index 7ae3e8246d8f..8cf6a8daf721 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.c
@@ -172,22 +172,24 @@ static int psp_rcv(struct sk_buff *skb)
return 0;
}
-void mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+bool mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{
u32 nisp_meta_data = be32_to_cpu(cqe->ft_metadata);
/* TBD: report errors as SW counters to ethtool, any further handling ? */
- switch (MLX5_NISP_METADATA_SYNDROM(nisp_meta_data)) {
- case MLX5E_NISP_OFFLOAD_RX_SYNDROME_DECRYPTED:
- if (psp_rcv(skb))
- netdev_warn_once(netdev, "PSP handling failed");
- skb->decrypted = 1;
- break;
- default:
- WARN_ON_ONCE(true);
- break;
- }
+ if (MLX5_NISP_METADATA_SYNDROM(nisp_meta_data) != MLX5E_NISP_OFFLOAD_RX_SYNDROME_DECRYPTED)
+ goto drop;
+
+ if (psp_rcv(skb))
+ goto drop;
+
+ skb->decrypted = 1;
+ return false;
+
+drop:
+ kfree_skb(skb);
+ return true;
}
void mlx5e_nisp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.h
index 834481232b21..1e13b09b3522 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.h
@@ -86,7 +86,7 @@ static inline bool mlx5e_nisp_is_rx_flow(struct mlx5_cqe64 *cqe)
return MLX5_NISP_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
-void mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+bool mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
struct mlx5_cqe64 *cqe);
void mlx5e_nisp_csum_complete(struct net_device *netdev, struct sk_buff *skb);
@@ -113,10 +113,11 @@ static inline bool mlx5e_nisp_is_rx_flow(struct mlx5_cqe64 *cqe)
return false;
}
-static inline void mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev,
+static inline bool mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{
+ return false;
}
static inline void mlx5e_nisp_csum_complete(struct net_device *netdev, struct sk_buff *skb) { }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ed3c7d8cf99d..22cf1c563844 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1552,7 +1552,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
#define MLX5E_CE_BIT_MASK 0x80
-static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -1566,8 +1566,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(get_cqe_tls_offload(cqe)))
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
- if (unlikely(mlx5e_nisp_is_rx_flow(cqe)))
- mlx5e_nisp_offload_handle_rx_skb(netdev, skb, cqe);
+ if (unlikely(mlx5e_nisp_is_rx_flow(cqe))) {
+ if (mlx5e_nisp_offload_handle_rx_skb(netdev, skb, cqe))
+ return true;
+ }
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
@@ -1612,9 +1614,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_skb_is_multicast(skb)))
stats->mcast_packets++;
+
+ return false;
}
-static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
+static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1626,16 +1630,20 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
stats->bytes += cqe_bcnt;
stats->gro_bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
- return;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return false;
+
+ if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
+ return true;
+
skb_reset_network_header(skb);
if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
+ return false;
}
-static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1644,7 +1652,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
static inline
@@ -1858,7 +1866,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -1905,7 +1914,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
@@ -1954,7 +1964,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
mlx5e_rep_tc_receive(cqe, rq, skb);
@@ -2375,7 +2386,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
}
- mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
+ if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
+ *skb = NULL;
+ goto free_hd_entry;
+ }
if (flush)
mlx5e_shampo_flush_skb(rq, cqe, match);
free_hd_entry:
@@ -2429,7 +2443,8 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -2762,7 +2777,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
if (!skb)
goto wq_cyc_pop;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
skb_push(skb, ETH_HLEN);
mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
Powered by blists - more mailing lists