[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190729211209.14772-12-saeedm@mellanox.com>
Date: Mon, 29 Jul 2019 21:13:12 +0000
From: Saeed Mahameed <saeedm@...lanox.com>
To: Saeed Mahameed <saeedm@...lanox.com>,
Leon Romanovsky <leonro@...lanox.com>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"linux-rdma@...r.kernel.org" <linux-rdma@...r.kernel.org>
CC: Parav Pandit <parav@...lanox.com>
Subject: [PATCH mlx5-next 11/11] net/mlx5: E-switch, Tide up eswitch config
sequence
From: Parav Pandit <parav@...lanox.com>
Currently for PF and ECPF vports, representors are created before
their eswitch hardware ports are initialized in below flow.
mlx5_eswitch_enable()
esw_offloads_init()
esw_offloads_load_all_reps()
[..]
esw_enable_vport()
However for VFs, vports are initialized before creating their
respective netdev represnetors in event handling context.
Similarly while disabling eswitch, first hardware vports are disabled,
followed by destroying their representors.
Here while underlying vports gets destroyed but its respective user
facing netdevice can still exist on which user can continue to perform
more offload operations.
Instead, its more accurate to do
enable_eswitch switchdev mode:
1. perform FDB tables initialization
2. initialize hw vport
3. create and publish representor for this vport
disable_eswitch switchdev mode:
1. destroy user facing representor for the vport
2. disable hw vport
3. perform FDB tables cleanup
Signed-off-by: Parav Pandit <parav@...lanox.com>
Signed-off-by: Saeed Mahameed <saeedm@...lanox.com>
---
.../net/ethernet/mellanox/mlx5/core/eswitch.c | 54 +++++++++++--------
.../net/ethernet/mellanox/mlx5/core/eswitch.h | 4 +-
.../mellanox/mlx5/core/eswitch_offloads.c | 8 ++-
3 files changed, 41 insertions(+), 25 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 90d150be237b..d4465dd18c11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -452,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+ MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_PROMISC_CHANGE)
+
+static int esw_legacy_enable(struct mlx5_eswitch *esw)
+{
+ int ret;
+
+ ret = esw_create_legacy_table(esw);
+ if (ret)
+ return ret;
+
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ return 0;
+}
+
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{
esw_cleanup_vepa_rules(esw);
@@ -459,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
esw_destroy_legacy_vepa_table(esw);
}
+static void esw_legacy_disable(struct mlx5_eswitch *esw)
+{
+ struct esw_mc_addr *mc_promisc;
+
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+
+ mc_promisc = &esw->mc_promisc;
+ if (mc_promisc->uplink_rule)
+ mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+ esw_destroy_legacy_table(esw);
+}
+
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
@@ -1826,13 +1855,8 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
esw_disable_vport(esw, vport);
}
-#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
- MLX5_VPORT_MC_ADDR_CHANGE | \
- MLX5_VPORT_PROMISC_CHANGE)
-
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
- int enabled_events;
int err;
if (!ESW_ALLOWED(esw) ||
@@ -1854,21 +1878,16 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
mlx5_lag_update(esw->dev);
if (mode == MLX5_ESWITCH_LEGACY) {
- err = esw_create_legacy_table(esw);
+ err = esw_legacy_enable(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- err = esw_offloads_init(esw);
+ err = esw_offloads_enable(esw);
}
if (err)
goto abort;
- enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? MLX5_LEGACY_SRIOV_VPORT_EVENTS :
- MLX5_VPORT_UC_ADDR_CHANGE;
-
- mlx5_eswitch_enable_pf_vf_vports(esw, enabled_events);
-
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
@@ -1890,7 +1909,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
- struct esw_mc_addr *mc_promisc;
int old_mode;
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
@@ -1902,16 +1920,10 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
mlx5_eswitch_event_handlers_unregister(esw);
- mlx5_eswitch_disable_pf_vf_vports(esw);
-
- mc_promisc = &esw->mc_promisc;
- if (mc_promisc->uplink_rule)
- mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_destroy_legacy_table(esw);
+ esw_legacy_disable(esw);
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_cleanup(esw);
+ esw_offloads_disable(esw);
esw_destroy_tsar(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 51b6d29466f1..d447e1e44d59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -242,8 +242,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs;
};
-void esw_offloads_cleanup(struct mlx5_eswitch *esw);
-int esw_offloads_init(struct mlx5_eswitch *esw);
+void esw_offloads_disable(struct mlx5_eswitch *esw);
+int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4be19890f725..db01b8ee9385 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2104,7 +2104,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK;
}
-int esw_offloads_init(struct mlx5_eswitch *esw)
+int esw_offloads_enable(struct mlx5_eswitch *esw)
{
int err;
@@ -2122,6 +2122,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
+
err = esw_offloads_load_all_reps(esw);
if (err)
goto err_reps;
@@ -2134,6 +2136,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
return 0;
err_reps:
+ mlx5_eswitch_disable_pf_vf_vports(esw);
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
@@ -2159,11 +2162,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err;
}
-void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
+ mlx5_eswitch_disable_pf_vf_vports(esw);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
--
2.21.0
Powered by blists - more mailing lists