[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220318205248.33367-5-saeed@kernel.org>
Date: Fri, 18 Mar 2022 13:52:37 -0700
From: Saeed Mahameed <saeed@...nel.org>
To: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>
Cc: netdev@...r.kernel.org, Maxim Mikityanskiy <maximmi@...dia.com>,
Tariq Toukan <tariqt@...dia.com>,
Saeed Mahameed <saeedm@...dia.com>
Subject: [net-next 04/15] net/mlx5e: Add XDP multi buffer support to the non-linear legacy RQ
From: Maxim Mikityanskiy <maximmi@...dia.com>
This commit adds XDP multi buffer support to the RX path in the
non-linear legacy RQ mode. mlx5e_xdp_handle is called from
mlx5e_skb_from_cqe_nonlinear.
XDP_TX action for fragmented XDP frames is not yet supported and
blocked.
Signed-off-by: Maxim Mikityanskiy <maximmi@...dia.com>
Reviewed-by: Tariq Toukan <tariqt@...dia.com>
Signed-off-by: Saeed Mahameed <saeedm@...dia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 5 +++++
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 13 +++++++++++++
2 files changed, 18 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 6aa77f0e094e..3a837030e96e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -68,6 +68,11 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
if (unlikely(!xdpf))
return false;
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ xdp_return_frame(xdpf);
+ return false;
+ }
+
xdptxd.data = xdpf->data;
xdptxd.len = xdpf->len;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index dd8ff62e1693..e9ad5b3a30ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1572,6 +1572,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_dma_info *di = wi->di;
struct skb_shared_info *sinfo;
u32 frag_consumed_bytes;
+ struct bpf_prog *prog;
struct xdp_buff xdp;
struct sk_buff *skb;
u32 truesize;
@@ -1582,6 +1583,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
rq->buff.frame0_sz, DMA_FROM_DEVICE);
+ net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
@@ -1629,6 +1631,17 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
di = head_wi->di;
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog && mlx5e_xdp_handle(rq, di, prog, &xdp)) {
+ if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+ int i;
+
+ for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++)
+ mlx5e_put_rx_frag(rq, &head_wi[i], true);
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
xdp.data - xdp.data_hard_start,
xdp.data_end - xdp.data,
--
2.35.1
Powered by blists - more mailing lists