[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <151396270467.20006.15724257628398704297.stgit@firesoul>
Date: Fri, 22 Dec 2017 18:11:44 +0100
From: Jesper Dangaard Brouer <brouer@...hat.com>
To: Daniel Borkmann <borkmann@...earbox.net>,
Alexei Starovoitov <alexei.starovoitov@...il.com>
Cc: netdev@...r.kernel.org, dsahern@...il.com,
Matan Barak <matanb@...lanox.com>,
Saeed Mahameed <saeedm@...lanox.com>,
Jesper Dangaard Brouer <brouer@...hat.com>,
gospo@...adcom.com, bjorn.topel@...el.com,
michael.chan@...adcom.com, Tariq Toukan <tariqt@...lanox.com>
Subject: [bpf-next V2 PATCH 02/14] xdp/mlx5: setup xdp_rxq_info
The mlx5 driver have a special drop-RQ queue (one per interface) that
simply drops all incoming traffic. It helps driver keep other HW
objects (flow steering) alive upon down/up operations. It is
temporarily pointed by flow steering objects during the interface
setup, and when interface is down. It lacks many fields that are set
in a regular RQ (for example its state is never switched to
MLX5_RQC_STATE_RDY). (Thanks to Tariq Toukan for explanation).
The XDP RX-queue info for this drop-RQ marked as unused, which
allow us to use the same takedown/free code path as other RX-queues.
Driver hook points for xdp_rxq_info:
* reg : mlx5e_alloc_rq()
* unused: mlx5e_alloc_drop_rq()
* unreg : mlx5e_free_rq()
Tested on actual hardware with samples/bpf program
Cc: Saeed Mahameed <saeedm@...lanox.com>
Cc: Matan Barak <matanb@...lanox.com>
Cc: Tariq Toukan <tariqt@...lanox.com>
Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 ++++
drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 9 +++++++++
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 1 +
3 files changed, 14 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c0872b3284cb..405b09db2a84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -46,6 +46,7 @@
#include <linux/mlx5/transobj.h>
#include <linux/rhashtable.h>
#include <net/switchdev.h>
+#include <net/xdp.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -568,6 +569,9 @@ struct mlx5e_rq {
u32 rqn;
struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
+
+ /* XDP read-mostly */
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct mlx5e_channel {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0f5c012de52e..128cb9a32549 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -589,6 +589,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
+ if (xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix) < 0)
+ goto err_rq_wq_destroy;
+
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = params->rq_headroom;
@@ -695,6 +698,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err_rq_wq_destroy:
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
@@ -707,6 +711,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_mpwqe_info(rq);
@@ -2768,6 +2774,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
if (err)
return err;
+ /* Mark as unused given "Drop-RQ" packets never reach XDP */
+ xdp_rxq_info_unused(&rq->xdp_rxq);
+
rq->mdev = mdev;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5b499c7a698f..7b38480811d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -812,6 +812,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
+ xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(prog, &xdp);
switch (act) {
Powered by blists - more mailing lists