[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190216013452.21131-5-saeedm@mellanox.com>
Date: Fri, 15 Feb 2019 17:34:43 -0800
From: Saeed Mahameed <saeedm@...lanox.com>
To: "David S. Miller" <davem@...emloft.net>
Cc: netdev@...r.kernel.org, Bodong Wang <bodong@...lanox.com>,
Or Gerlitz <ogerlitz@...lanox.com>,
Saeed Mahameed <saeedm@...lanox.com>
Subject: [net-next 04/13] net/mlx5: E-Switch, Refactor offloads flow steering init/cleanup
From: Bodong Wang <bodong@...lanox.com>
E-switch offloads mode initialize/cleanup multiple steering related
entities (flow table/group). Refactor these operations to internal
helper functions for better block design.
This patch doesn't change any functionality.
Signed-off-by: Bodong Wang <bodong@...lanox.com>
Reviewed-by: Or Gerlitz <ogerlitz@...lanox.com>
Signed-off-by: Saeed Mahameed <saeedm@...lanox.com>
---
.../mellanox/mlx5/core/eswitch_offloads.c | 43 +++++++++++++------
1 file changed, 31 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index af2c44d31357..19969d487a01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1404,7 +1404,7 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
@@ -1422,16 +1422,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto create_fg_err;
- err = esw_offloads_load_reps(esw, nvports);
- if (err)
- goto err_reps;
-
- esw_offloads_devcom_init(esw);
return 0;
-err_reps:
- esw_destroy_vport_rx_group(esw);
-
create_fg_err:
esw_destroy_offloads_table(esw);
@@ -1441,6 +1433,35 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
return err;
}
+static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
+{
+ esw_destroy_vport_rx_group(esw);
+ esw_destroy_offloads_table(esw);
+ esw_destroy_offloads_fdb_tables(esw);
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+ int err;
+
+ mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
+
+ err = esw_offloads_steering_init(esw, nvports);
+ if (err)
+ return err;
+
+ err = esw_offloads_load_reps(esw, nvports);
+ if (err)
+ goto err_reps;
+
+ esw_offloads_devcom_init(esw);
+ return 0;
+
+err_reps:
+ esw_offloads_steering_cleanup(esw);
+ return err;
+}
+
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
@@ -1464,9 +1485,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
{
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_reps(esw, nvports);
- esw_destroy_vport_rx_group(esw);
- esw_destroy_offloads_table(esw);
- esw_destroy_offloads_fdb_tables(esw);
+ esw_offloads_steering_cleanup(esw);
}
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
--
2.20.1
Powered by blists - more mailing lists