lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191107160834.21087-5-parav@mellanox.com>
Date:   Thu,  7 Nov 2019 10:08:20 -0600
From:   Parav Pandit <parav@...lanox.com>
To:     alex.williamson@...hat.com, davem@...emloft.net,
        kvm@...r.kernel.org, netdev@...r.kernel.org
Cc:     saeedm@...lanox.com, kwankhede@...dia.com, leon@...nel.org,
        cohuck@...hat.com, jiri@...lanox.com, linux-rdma@...r.kernel.org,
        Vu Pham <vuhuong@...lanox.com>,
        Parav Pandit <parav@...lanox.com>
Subject: [PATCH net-next 05/19] net/mlx5: E-Switch, Enable/disable SF's vport during SF life cycle

From: Vu Pham <vuhuong@...lanox.com>

Enable/disable SF vport and its representors during SF
allocation/free sequence respectively.

Reviewed-by: Saeed Mahameed <saeedm@...lanox.com>
Signed-off-by: Vu Pham <vuhuong@...lanox.com>
Signed-off-by: Parav Pandit <parav@...lanox.com>
---
 .../net/ethernet/mellanox/mlx5/core/eswitch.c |  16 +--
 .../net/ethernet/mellanox/mlx5/core/eswitch.h |   7 ++
 .../mellanox/mlx5/core/eswitch_offloads.c     | 111 ++++++++++++++++++
 .../ethernet/mellanox/mlx5/core/meddev/sf.c   |   8 ++
 4 files changed, 134 insertions(+), 8 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 87273be44dae..1c763a5c955c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1532,9 +1532,9 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
 	esw->qos.enabled = false;
 }
 
-static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
-				struct mlx5_vport *vport,
-				u32 initial_max_rate, u32 initial_bw_share)
+int mlx5_eswitch_vport_enable_qos(struct mlx5_eswitch *esw,
+				  struct mlx5_vport *vport,
+				  u32 initial_max_rate, u32 initial_bw_share)
 {
 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
 	struct mlx5_core_dev *dev = esw->dev;
@@ -1573,8 +1573,8 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
 	return 0;
 }
 
-static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
-				  struct mlx5_vport *vport)
+void mlx5_eswitch_vport_disable_qos(struct mlx5_eswitch *esw,
+				    struct mlx5_vport *vport)
 {
 	int err;
 
@@ -1795,8 +1795,8 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
 		goto done;
 
 	/* Attach vport to the eswitch rate limiter */
-	if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
-				 vport->qos.bw_share))
+	if (mlx5_eswitch_vport_enable_qos(esw, vport, vport->info.max_rate,
+					  vport->qos.bw_share))
 		esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
 
 	/* Sync with current vport context */
@@ -1840,7 +1840,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
 	 */
 	esw_vport_change_handle_locked(vport);
 	vport->enabled_events = 0;
-	esw_vport_disable_qos(esw, vport);
+	mlx5_eswitch_vport_disable_qos(esw, vport);
 
 	if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
 	    esw->mode == MLX5_ESWITCH_LEGACY)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 21592ef6d05d..6c2ea3bb39cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -298,6 +298,13 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
 				 u16 vport,
 				 struct ifla_vf_stats *vf_stats);
+int mlx5_eswitch_vport_enable_qos(struct mlx5_eswitch *esw,
+				  struct mlx5_vport *vport,
+				  u32 initial_max_rate, u32 initial_bw_share);
+void mlx5_eswitch_vport_disable_qos(struct mlx5_eswitch *esw,
+				    struct mlx5_vport *vport);
+int mlx5_eswitch_setup_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_eswitch_cleanup_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
 
 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index ff084499d681..a6906bff37a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1624,6 +1624,117 @@ static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
 	return err;
 }
 
+static int esw_offloads_load_vport_reps(struct mlx5_eswitch *esw, u16 vport_num)
+{
+	struct mlx5_eswitch_rep *rep;
+	u8 rep_type;
+	int err;
+
+	rep = mlx5_eswitch_get_rep(esw, vport_num);
+	for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+		err = __esw_offloads_load_rep(esw, rep, rep_type);
+		if (err) {
+			esw_warn(esw->dev, "Load vport(%d) rep type(%d) err!\n",
+				 vport_num, rep_type);
+			goto err_reps;
+		}
+	}
+
+	return 0;
+
+err_reps:
+	while (rep_type-- > 0)
+		__esw_offloads_unload_rep(esw, rep, rep_type);
+	return err;
+}
+
+static void
+esw_offloads_unload_vport_reps(struct mlx5_eswitch *esw, u16 vport_num)
+{
+	struct mlx5_eswitch_rep *rep;
+	u8 rep_type = NUM_REP_TYPES;
+
+	rep = mlx5_eswitch_get_rep(esw, vport_num);
+	while (rep_type-- > 0)
+		__esw_offloads_unload_rep(esw, rep, rep_type);
+}
+
+static int
+esw_enable_sf_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+	int ret;
+
+	ret = esw_vport_create_offloads_acl_tables(esw, vport);
+	if (ret)
+		return ret;
+
+	mutex_lock(&esw->state_lock);
+
+	mlx5_modify_nic_vport_mac_address(esw->dev, vport->vport, vport->info.mac);
+	mlx5_modify_nic_vport_node_guid(esw->dev, vport->vport,
+					vport->info.node_guid);
+
+	/* Attach vport to the eswitch rate limiter */
+	ret = mlx5_eswitch_vport_enable_qos(esw, vport, vport->info.max_rate,
+					    vport->qos.bw_share);
+	if (ret)
+		goto qos_err;
+
+	vport->enabled = true;
+	esw_debug(esw->dev, "Enabled SF vport(0x%x)\n", vport->vport);
+
+	mutex_unlock(&esw->state_lock);
+	return 0;
+
+qos_err:
+	mutex_unlock(&esw->state_lock);
+	esw_vport_destroy_offloads_acl_tables(esw, vport);
+	return ret;
+}
+
+static void
+esw_disable_sf_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+	mutex_lock(&esw->state_lock);
+
+	esw_debug(esw->dev, "Disabling vport(0x%x)\n", vport->vport);
+	vport->enabled = false;
+	mlx5_eswitch_vport_disable_qos(esw, vport);
+
+	mutex_unlock(&esw->state_lock);
+
+	esw_vport_destroy_offloads_acl_tables(esw, vport);
+}
+
+int mlx5_eswitch_setup_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+	int ret;
+
+	if (IS_ERR(vport))
+		return PTR_ERR(vport);
+
+	ret = esw_enable_sf_vport(esw, vport);
+	if (ret)
+		return ret;
+
+	ret = esw_offloads_load_vport_reps(esw, vport_num);
+	if (ret)
+		esw_disable_sf_vport(esw, vport);
+	return ret;
+}
+
+void mlx5_eswitch_cleanup_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+	if (IS_ERR(vport))
+		return;
+
+	esw_offloads_unload_vport_reps(esw, vport_num);
+	esw_disable_sf_vport(esw, vport);
+}
+
 #define ESW_OFFLOADS_DEVCOM_PAIR	(0)
 #define ESW_OFFLOADS_DEVCOM_UNPAIR	(1)
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/meddev/sf.c b/drivers/net/ethernet/mellanox/mlx5/core/meddev/sf.c
index d57109a9c53b..fb4ba7be0051 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/meddev/sf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/meddev/sf.c
@@ -7,6 +7,7 @@
 #include <linux/bitmap.h>
 #include "sf.h"
 #include "mlx5_core.h"
+#include "eswitch.h"
 
 static int
 mlx5_cmd_query_sf_partitions(struct mlx5_core_dev *mdev, u32 *out, int outlen)
@@ -149,11 +150,17 @@ mlx5_sf_alloc(struct mlx5_core_dev *coredev, struct mlx5_sf_table *sf_table,
 	if (ret)
 		goto enable_err;
 
+	ret = mlx5_eswitch_setup_sf_vport(coredev->priv.eswitch, hw_function_id);
+	if (ret)
+		goto vport_err;
+
 	sf->idx = sf_id;
 	sf->base_addr = sf_table->base_address +
 				(sf->idx << (sf_table->log_sf_bar_size + 12));
 	return sf;
 
+vport_err:
+	mlx5_core_disable_sf_hca(coredev, hw_function_id);
 enable_err:
 	mlx5_cmd_dealloc_sf(coredev, hw_function_id);
 alloc_sf_err:
@@ -169,6 +176,7 @@ void mlx5_sf_free(struct mlx5_core_dev *coredev, struct mlx5_sf_table *sf_table,
 	u16 hw_function_id;
 
 	hw_function_id = mlx5_sf_hw_id(coredev, sf->idx);
+	mlx5_eswitch_cleanup_sf_vport(coredev->priv.eswitch, hw_function_id);
 	mlx5_core_disable_sf_hca(coredev, hw_function_id);
 	mlx5_cmd_dealloc_sf(coredev, hw_function_id);
 	free_sf_id(sf_table, sf->idx);
-- 
2.19.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ