lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1449612225-30789-7-git-send-email-saeedm@mellanox.com>
Date:	Wed,  9 Dec 2015 00:03:44 +0200
From:	Saeed Mahameed <saeedm@...lanox.com>
To:	"David S. Miller" <davem@...emloft.net>
Cc:	netdev@...r.kernel.org, Or Gerlitz <ogerlitz@...lanox.com>,
	Maor Gottlieb <maorg@...lanox.com>,
	Tal Alon <talal@...lanox.com>,
	Majd Dibbiny <majd@...lanox.com>,
	Matan Barak <matanb@...lanox.com>, saeedm@....mellanox.co.il,
	Moni Shoua <monis@...lanox.com>,
	Saeed Mahameed <saeedm@...lanox.com>
Subject: [PATCH net-next 6/7] net/mlx5: Use flow steering infrastructure for mlx5_en

From: Maor Gottlieb <maorg@...lanox.com>

Expose the new flow steering API and remove the old
one.

Few changes are required:

1. The Ethernet flow steering follows the existing implementation, but uses
the new steering API. The old flow steering implementation is removed.

2. Move the E-switch FDB management to use the new API.

3. When driver is loaded call to mlx5_init_fs which initialize
the flow steering tree structure, open namespaces for NIC receive
and for E-switch FDB.

4. Call to mlx5_cleanup_fs when the driver is unloaded.

Signed-off-by: Maor Gottlieb <maorg@...lanox.com>
Signed-off-by: Moni Shoua <monis@...lanox.com>
Signed-off-by: Matan Barak <matanb@...lanox.com>
Signed-off-by: Saeed Mahameed <saeedm@...lanox.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/Makefile   |    2 +-
 drivers/net/ethernet/mellanox/mlx5/core/en.h       |   23 +-
 .../ethernet/mellanox/mlx5/core/en_flow_table.c    |  824 ++++++++++++--------
 drivers/net/ethernet/mellanox/mlx5/core/en_main.c  |    2 +-
 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c  |  291 ++------
 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h  |   15 +-
 .../net/ethernet/mellanox/mlx5/core/flow_table.c   |  422 ----------
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c  |   26 +-
 drivers/net/ethernet/mellanox/mlx5/core/main.c     |    9 +
 include/linux/mlx5/flow_table.h                    |   63 --
 include/linux/mlx5/fs.h                            |   38 +
 11 files changed, 633 insertions(+), 1082 deletions(-)
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
 delete mode 100644 include/linux/mlx5/flow_table.h

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 7fc5e23..11ee062 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_MLX5_CORE)		+= mlx5_core.o
 mlx5_core-y :=	main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
 		health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o   \
 		mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o eswitch.o \
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
 		en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
 		en_txrx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 89313d4..f689ce5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -64,6 +64,8 @@
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
 #define MLX5E_SQ_BF_BUDGET             16
 
+#define MLX5E_NUM_MAIN_GROUPS 9
+
 static const char vport_strings[][ETH_GSTRING_LEN] = {
 	/* vport statistics */
 	"rx_packets",
@@ -442,7 +444,7 @@ enum mlx5e_rqt_ix {
 struct mlx5e_eth_addr_info {
 	u8  addr[ETH_ALEN + 2];
 	u32 tt_vec;
-	u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+	struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
 };
 
 #define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
@@ -466,15 +468,22 @@ enum {
 
 struct mlx5e_vlan_db {
 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-	u32           active_vlans_ft_ix[VLAN_N_VID];
-	u32           untagged_rule_ft_ix;
-	u32           any_vlan_rule_ft_ix;
+	struct mlx5_flow_rule	*active_vlans_rule[VLAN_N_VID];
+	struct mlx5_flow_rule	*untagged_rule;
+	struct mlx5_flow_rule	*any_vlan_rule;
 	bool          filter_disabled;
 };
 
 struct mlx5e_flow_table {
-	void *vlan;
-	void *main;
+	int num_groups;
+	struct mlx5_flow_table		*t;
+	struct mlx5_flow_group		**g;
+};
+
+struct mlx5e_flow_tables {
+	struct mlx5_flow_namespace	*ns;
+	struct mlx5e_flow_table		vlan;
+	struct mlx5e_flow_table		main;
 };
 
 struct mlx5e_priv {
@@ -497,7 +506,7 @@ struct mlx5e_priv {
 	u32                        rqtn[MLX5E_NUM_RQT];
 	u32                        tirn[MLX5E_NUM_TT];
 
-	struct mlx5e_flow_table    ft;
+	struct mlx5e_flow_tables   fts;
 	struct mlx5e_eth_addr_db   eth_addr;
 	struct mlx5e_vlan_db       vlan;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 5b93c9c..80d81ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -34,9 +34,11 @@
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
-#include <linux/mlx5/flow_table.h>
+#include <linux/mlx5/fs.h>
 #include "en.h"
 
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
 enum {
 	MLX5E_FULLMATCH = 0,
 	MLX5E_ALLMULTI  = 1,
@@ -103,44 +105,38 @@ static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
 					       struct mlx5e_eth_addr_info *ai)
 {
-	void *ft = priv->ft.main;
-
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
-		mlx5_del_flow_table_entry(ft,
-					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
-		mlx5_del_flow_table_entry(ft,
-					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
-		mlx5_del_flow_table_entry(ft,
-					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
-		mlx5_del_flow_table_entry(ft,
-					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
-		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
-		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
-		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
-		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
-		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
-		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
 
 	if (ai->tt_vec & BIT(MLX5E_TT_ANY))
-		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
 }
 
 static int mlx5e_get_eth_addr_type(u8 *addr)
@@ -240,44 +236,34 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
 }
 
 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
-				     struct mlx5e_eth_addr_info *ai, int type,
-				     void *flow_context, void *match_criteria)
+				     struct mlx5e_eth_addr_info *ai,
+				     int type, u32 *mc, u32 *mv)
 {
+	struct mlx5_flow_destination dest;
 	u8 match_criteria_enable = 0;
-	void *match_value;
-	void *dest;
-	u8   *dmac;
-	u8   *match_criteria_dmac;
-	void *ft   = priv->ft.main;
-	u32  *tirn = priv->tirn;
-	u32  *ft_ix;
-	u32  tt_vec;
-	int  err;
-
-	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
-	dmac = MLX5_ADDR_OF(fte_match_param, match_value,
-			    outer_headers.dmac_47_16);
-	match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
-					   outer_headers.dmac_47_16);
-	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
-
-	MLX5_SET(flow_context, flow_context, action,
-		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
-	MLX5_SET(dest_format_struct, dest, destination_type,
-		 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+	struct mlx5_flow_rule **rule_p;
+	struct mlx5_flow_table *ft = priv->fts.main.t;
+	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+				   outer_headers.dmac_47_16);
+	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
+				   outer_headers.dmac_47_16);
+	u32 *tirn = priv->tirn;
+	u32 tt_vec;
+	int err = 0;
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 
 	switch (type) {
 	case MLX5E_FULLMATCH:
 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-		memset(match_criteria_dmac, 0xff, ETH_ALEN);
-		ether_addr_copy(dmac, ai->addr);
+		eth_broadcast_addr(mc_dmac);
+		ether_addr_copy(mv_dmac, ai->addr);
 		break;
 
 	case MLX5E_ALLMULTI:
 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-		match_criteria_dmac[0] = 0x01;
-		dmac[0] = 0x01;
+		mc_dmac[0] = 0x01;
+		mv_dmac[0] = 0x01;
 		break;
 
 	case MLX5E_PROMISC:
@@ -286,190 +272,165 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
 
 	tt_vec = mlx5e_get_tt_vec(ai, type);
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_ANY]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
+		dest.tir_num = tirn[MLX5E_TT_ANY];
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_ANY);
 	}
 
 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
-			 outer_headers.ethertype);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
+		dest.tir_num = tirn[MLX5E_TT_IPV4];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IP);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV4]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
 	}
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
+		dest.tir_num = tirn[MLX5E_TT_IPV6];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IPV6);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV6]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
 	}
 
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
-			 outer_headers.ip_protocol);
-	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
-		 IPPROTO_UDP);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
+		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IP);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV4_UDP]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
 	}
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
+		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IPV6);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV6_UDP]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
 	}
 
-	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
-		 IPPROTO_TCP);
+	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
+		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IP);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV4_TCP]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
 	}
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
+		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IPV6);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV6_TCP]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
 
 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
 	}
 
-	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
-		 IPPROTO_AH);
+	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
+		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IP);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV4_IPSEC_AH]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
 	}
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
+		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IPV6);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV6_IPSEC_AH]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
 	}
 
-	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
-		 IPPROTO_ESP);
+	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
+		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IP);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
 	}
 
-	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
-		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
+		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
+		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
 			 ETH_P_IPV6);
-		MLX5_SET(dest_format_struct, dest, destination_id,
-			 tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
-		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-						match_criteria, flow_context,
-						ft_ix);
-		if (err)
+		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+		if (IS_ERR_OR_NULL(*rule_p))
 			goto err_del_ai;
-
 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
 	}
 
 	return 0;
 
 err_del_ai:
+	err = PTR_ERR(*rule_p);
+	*rule_p = NULL;
 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
 
 	return err;
@@ -478,27 +439,25 @@ err_del_ai:
 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
 				   struct mlx5e_eth_addr_info *ai, int type)
 {
-	u32 *flow_context;
 	u32 *match_criteria;
-	int err;
+	u32 *match_value;
+	int err = 0;
 
-	flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
-				      MLX5_ST_SZ_BYTES(dest_format_struct));
-	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
-	if (!flow_context || !match_criteria) {
+	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	if (!match_value || !match_criteria) {
 		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
 		err = -ENOMEM;
 		goto add_eth_addr_rule_out;
 	}
 
-	err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
-					match_criteria);
-	if (err)
-		netdev_err(priv->netdev, "%s: failed\n", __func__);
+	err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
+					match_value);
 
 add_eth_addr_rule_out:
 	kvfree(match_criteria);
-	kvfree(flow_context);
+	kvfree(match_value);
+
 	return err;
 }
 
@@ -551,72 +510,77 @@ enum mlx5e_vlan_rule_type {
 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
 };
 
-static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
-			       enum mlx5e_vlan_rule_type rule_type, u16 vid)
+static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+				 enum mlx5e_vlan_rule_type rule_type,
+				 u16 vid, u32 *mc, u32 *mv)
 {
+	struct mlx5_flow_table *ft = priv->fts.vlan.t;
+	struct mlx5_flow_destination dest;
 	u8 match_criteria_enable = 0;
-	u32 *flow_context;
-	void *match_value;
-	void *dest;
-	u32 *match_criteria;
-	u32 *ft_ix;
-	int err;
+	struct mlx5_flow_rule **rule_p;
+	int err = 0;
 
-	flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
-				      MLX5_ST_SZ_BYTES(dest_format_struct));
-	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
-	if (!flow_context || !match_criteria) {
-		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-		err = -ENOMEM;
-		goto add_vlan_rule_out;
-	}
-	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
-	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
-
-	MLX5_SET(flow_context, flow_context, action,
-		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
-	MLX5_SET(dest_format_struct, dest, destination_type,
-		 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
-	MLX5_SET(dest_format_struct, dest, destination_id,
-		 mlx5_get_flow_table_id(priv->ft.main));
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest.ft = priv->fts.main.t;
 
 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
-			 outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
 
 	switch (rule_type) {
 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-		ft_ix = &priv->vlan.untagged_rule_ft_ix;
+		rule_p = &priv->vlan.untagged_rule;
 		break;
 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-		ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
-		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
-			 1);
+		rule_p = &priv->vlan.any_vlan_rule;
+		MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
 		break;
 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
-		err = mlx5e_vport_context_update_vlans(priv);
-		if (err)
-			goto add_vlan_rule_out;
-
-		ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
-		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
-			 1);
-		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
-				 outer_headers.first_vid);
-		MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
-			 vid);
+		rule_p = &priv->vlan.active_vlans_rule[vid];
+		MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
+		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
+		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
 		break;
 	}
 
-	err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
-					match_criteria, flow_context, ft_ix);
-	if (err)
-		netdev_err(priv->netdev, "%s: failed\n", __func__);
+	*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+				     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+				     MLX5_FS_DEFAULT_FLOW_TAG,
+				     &dest);
+
+	if (IS_ERR(*rule_p)) {
+		err = PTR_ERR(*rule_p);
+		*rule_p = NULL;
+		netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+	}
+
+	return err;
+}
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+			       enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+	u32 *match_criteria;
+	u32 *match_value;
+	int err = 0;
+
+	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	if (!match_value || !match_criteria) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		err = -ENOMEM;
+		goto add_vlan_rule_out;
+	}
+
+	if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
+		mlx5e_vport_context_update_vlans(priv);
+
+	err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
+				    match_value);
 
 add_vlan_rule_out:
 	kvfree(match_criteria);
-	kvfree(flow_context);
+	kvfree(match_value);
+
 	return err;
 }
 
@@ -625,16 +589,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
 {
 	switch (rule_type) {
 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-		mlx5_del_flow_table_entry(priv->ft.vlan,
-					  priv->vlan.untagged_rule_ft_ix);
+		if (priv->vlan.untagged_rule) {
+			mlx5_del_flow_rule(priv->vlan.untagged_rule);
+			priv->vlan.untagged_rule = NULL;
+		}
 		break;
 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-		mlx5_del_flow_table_entry(priv->ft.vlan,
-					  priv->vlan.any_vlan_rule_ft_ix);
+		if (priv->vlan.any_vlan_rule) {
+			mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
+			priv->vlan.any_vlan_rule = NULL;
+		}
 		break;
 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
-		mlx5_del_flow_table_entry(priv->ft.vlan,
-					  priv->vlan.active_vlans_ft_ix[vid]);
+		mlx5e_vport_context_update_vlans(priv);
+		if (priv->vlan.active_vlans_rule[vid]) {
+			mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
+			priv->vlan.active_vlans_rule[vid] = NULL;
+		}
 		mlx5e_vport_context_update_vlans(priv);
 		break;
 	}
@@ -889,151 +860,358 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
 	mlx5e_vport_context_update(priv);
 }
 
+static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
+{
+	int i;
+
+	for (i = ft->num_groups - 1; i >= 0; i--) {
+		if (!IS_ERR_OR_NULL(ft->g[i]))
+			mlx5_destroy_flow_group(ft->g[i]);
+		ft->g[i] = NULL;
+	}
+	ft->num_groups = 0;
+}
+
 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
 {
 	ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
 }
 
-static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+#define MLX5E_MAIN_GROUP0_SIZE	BIT(3)
+#define MLX5E_MAIN_GROUP1_SIZE	BIT(1)
+#define MLX5E_MAIN_GROUP2_SIZE	BIT(0)
+#define MLX5E_MAIN_GROUP3_SIZE	BIT(14)
+#define MLX5E_MAIN_GROUP4_SIZE	BIT(13)
+#define MLX5E_MAIN_GROUP5_SIZE	BIT(11)
+#define MLX5E_MAIN_GROUP6_SIZE	BIT(2)
+#define MLX5E_MAIN_GROUP7_SIZE	BIT(1)
+#define MLX5E_MAIN_GROUP8_SIZE	BIT(0)
+#define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
+				 MLX5E_MAIN_GROUP1_SIZE +\
+				 MLX5E_MAIN_GROUP2_SIZE +\
+				 MLX5E_MAIN_GROUP3_SIZE +\
+				 MLX5E_MAIN_GROUP4_SIZE +\
+				 MLX5E_MAIN_GROUP5_SIZE +\
+				 MLX5E_MAIN_GROUP6_SIZE +\
+				 MLX5E_MAIN_GROUP7_SIZE +\
+				 MLX5E_MAIN_GROUP8_SIZE)
+
+static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
+				      int inlen)
 {
-	struct mlx5_flow_table_group *g;
-	u8 *dmac;
+	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
+				match_criteria.outer_headers.dmac_47_16);
+	int err;
+	int ix = 0;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_MAIN_GROUP0_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_MAIN_GROUP1_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_MAIN_GROUP2_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+	eth_broadcast_addr(dmac);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_MAIN_GROUP3_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+	eth_broadcast_addr(dmac);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_MAIN_GROUP4_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	eth_broadcast_addr(dmac);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_MAIN_GROUP5_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+	dmac[0] = 0x01;
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_MAIN_GROUP6_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+	dmac[0] = 0x01;
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_MAIN_GROUP7_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	dmac[0] = 0x01;
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_MAIN_GROUP8_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	return 0;
+
+err_destroy_groups:
+	err = PTR_ERR(ft->g[ft->num_groups]);
+	ft->g[ft->num_groups] = NULL;
+	mlx5e_destroy_groups(ft);
+
+	return err;
+}
 
-	g = kcalloc(9, sizeof(*g), GFP_KERNEL);
-	if (!g)
+static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
+{
+	u32 *in;
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
 		return -ENOMEM;
 
-	g[0].log_sz = 3;
-	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
-			 outer_headers.ethertype);
-	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
-			 outer_headers.ip_protocol);
-
-	g[1].log_sz = 1;
-	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
-			 outer_headers.ethertype);
-
-	g[2].log_sz = 0;
-
-	g[3].log_sz = 14;
-	g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
-			    outer_headers.dmac_47_16);
-	memset(dmac, 0xff, ETH_ALEN);
-	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
-			 outer_headers.ethertype);
-	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
-			 outer_headers.ip_protocol);
-
-	g[4].log_sz = 13;
-	g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
-			    outer_headers.dmac_47_16);
-	memset(dmac, 0xff, ETH_ALEN);
-	MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
-			 outer_headers.ethertype);
-
-	g[5].log_sz = 11;
-	g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
-			    outer_headers.dmac_47_16);
-	memset(dmac, 0xff, ETH_ALEN);
-
-	g[6].log_sz = 2;
-	g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
-			    outer_headers.dmac_47_16);
-	dmac[0] = 0x01;
-	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
-			 outer_headers.ethertype);
-	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
-			 outer_headers.ip_protocol);
-
-	g[7].log_sz = 1;
-	g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
-			    outer_headers.dmac_47_16);
-	dmac[0] = 0x01;
-	MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
-			 outer_headers.ethertype);
+	err = __mlx5e_create_main_groups(ft, in, inlen);
 
-	g[8].log_sz = 0;
-	g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
-			    outer_headers.dmac_47_16);
-	dmac[0] = 0x01;
-	priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
-					       MLX5_FLOW_TABLE_TYPE_NIC_RCV,
-					       9, g);
-	kfree(g);
+	kvfree(in);
+	return err;
+}
 
-	return priv->ft.main ? 0 : -ENOMEM;
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+	struct mlx5e_flow_table *ft = &priv->fts.main;
+	int err;
+
+	ft->num_groups = 0;
+	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_MAIN_TABLE_SIZE);
+
+	if (IS_ERR(ft->t)) {
+		err = PTR_ERR(ft->t);
+		ft->t = NULL;
+		return err;
+	}
+	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+	if (!ft->g) {
+		err = -ENOMEM;
+		goto err_destroy_main_flow_table;
+	}
+
+	err = mlx5e_create_main_groups(ft);
+	if (err)
+		goto err_free_g;
+	return 0;
+
+err_free_g:
+	kfree(ft->g);
+
+err_destroy_main_flow_table:
+	mlx5_destroy_flow_table(ft->t);
+	ft->t = NULL;
+
+	return err;
+}
+
+static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
+{
+	mlx5e_destroy_groups(ft);
+	kfree(ft->g);
+	mlx5_destroy_flow_table(ft->t);
+	ft->t = NULL;
 }
 
 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
 {
-	mlx5_destroy_flow_table(priv->ft.main);
+	mlx5e_destroy_flow_table(&priv->fts.main);
 }
 
-static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+#define MLX5E_NUM_VLAN_GROUPS	2
+#define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
+#define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
+#define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
+				 MLX5E_VLAN_GROUP1_SIZE)
+
+static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
+				      int inlen)
+{
+	int err;
+	int ix = 0;
+	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_VLAN_GROUP0_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_VLAN_GROUP1_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	return 0;
+
+err_destroy_groups:
+	err = PTR_ERR(ft->g[ft->num_groups]);
+	ft->g[ft->num_groups] = NULL;
+	mlx5e_destroy_groups(ft);
+
+	return err;
+}
+
+static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
 {
-	struct mlx5_flow_table_group *g;
+	u32 *in;
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	int err;
 
-	g = kcalloc(2, sizeof(*g), GFP_KERNEL);
-	if (!g)
+	in = mlx5_vzalloc(inlen);
+	if (!in)
 		return -ENOMEM;
 
-	g[0].log_sz = 12;
-	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
-			 outer_headers.vlan_tag);
-	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
-			 outer_headers.first_vid);
-
-	/* untagged + any vlan id */
-	g[1].log_sz = 1;
-	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
-			 outer_headers.vlan_tag);
-
-	priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
-					       MLX5_FLOW_TABLE_TYPE_NIC_RCV,
-					       2, g);
-
-	kfree(g);
-	return priv->ft.vlan ? 0 : -ENOMEM;
+	err = __mlx5e_create_vlan_groups(ft, in, inlen);
+
+	kvfree(in);
+	return err;
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+	struct mlx5e_flow_table *ft = &priv->fts.vlan;
+	int err;
+
+	ft->num_groups = 0;
+	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_VLAN_TABLE_SIZE);
+
+	if (IS_ERR(ft->t)) {
+		err = PTR_ERR(ft->t);
+		ft->t = NULL;
+		return err;
+	}
+	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+	if (!ft->g) {
+		err = -ENOMEM;
+		goto err_destroy_vlan_flow_table;
+	}
+
+	err = mlx5e_create_vlan_groups(ft);
+	if (err)
+		goto err_free_g;
+
+	return 0;
+
+err_free_g:
+	kfree(ft->g);
+
+err_destroy_vlan_flow_table:
+	mlx5_destroy_flow_table(ft->t);
+	ft->t = NULL;
+
+	return err;
 }
 
 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
 {
-	mlx5_destroy_flow_table(priv->ft.vlan);
+	mlx5e_destroy_flow_table(&priv->fts.vlan);
 }
 
 int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
 {
 	int err;
 
-	err = mlx5e_create_main_flow_table(priv);
+	priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
+					       MLX5_FLOW_NAMESPACE_KERNEL);
+
+	if (!priv->fts.ns)
+		return -EINVAL;
+
+	err = mlx5e_create_vlan_flow_table(priv);
 	if (err)
 		return err;
 
-	err = mlx5e_create_vlan_flow_table(priv);
+	err = mlx5e_create_main_flow_table(priv);
 	if (err)
-		goto err_destroy_main_flow_table;
+		goto err_destroy_vlan_flow_table;
 
 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
 	if (err)
-		goto err_destroy_vlan_flow_table;
+		goto err_destroy_main_flow_table;
 
 	return 0;
 
-err_destroy_vlan_flow_table:
-	mlx5e_destroy_vlan_flow_table(priv);
-
 err_destroy_main_flow_table:
 	mlx5e_destroy_main_flow_table(priv);
+err_destroy_vlan_flow_table:
+	mlx5e_destroy_vlan_flow_table(priv);
 
 	return err;
 }
@@ -1041,6 +1219,6 @@ err_destroy_main_flow_table:
 void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
 {
 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-	mlx5e_destroy_vlan_flow_table(priv);
 	mlx5e_destroy_main_flow_table(priv);
+	mlx5e_destroy_vlan_flow_table(priv);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d67058a..b15bcaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -30,7 +30,7 @@
  * SOFTWARE.
  */
 
-#include <linux/mlx5/flow_table.h>
+#include <linux/mlx5/fs.h>
 #include "en.h"
 #include "eswitch.h"
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d8939e5..bc3d9f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -34,7 +34,7 @@
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/mlx5_ifc.h>
 #include <linux/mlx5/vport.h>
-#include <linux/mlx5/flow_table.h>
+#include <linux/mlx5/fs.h>
 #include "mlx5_core.h"
 #include "eswitch.h"
 
@@ -321,220 +321,6 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
 	free_l2_table_index(l2_table, index);
 }
 
-/* E-Switch FDB flow steering */
-struct dest_node {
-	struct list_head list;
-	struct mlx5_flow_destination dest;
-};
-
-static int _mlx5_flow_rule_apply(struct mlx5_flow_rule *fr)
-{
-	bool was_valid = fr->valid;
-	struct dest_node *dest_n;
-	u32 dest_list_size = 0;
-	void *in_match_value;
-	u32 *flow_context;
-	u32 flow_index;
-	int err;
-	int i;
-
-	if (list_empty(&fr->dest_list)) {
-		if (fr->valid)
-			mlx5_del_flow_table_entry(fr->ft, fr->fi);
-		fr->valid = false;
-		return 0;
-	}
-
-	list_for_each_entry(dest_n, &fr->dest_list, list)
-		dest_list_size++;
-
-	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
-				    MLX5_ST_SZ_BYTES(dest_format_struct) *
-				    dest_list_size);
-	if (!flow_context)
-		return -ENOMEM;
-
-	MLX5_SET(flow_context, flow_context, flow_tag, fr->flow_tag);
-	MLX5_SET(flow_context, flow_context, action, fr->action);
-	MLX5_SET(flow_context, flow_context, destination_list_size,
-		 dest_list_size);
-
-	i = 0;
-	list_for_each_entry(dest_n, &fr->dest_list, list) {
-		void *dest_addr = MLX5_ADDR_OF(flow_context, flow_context,
-					       destination[i++]);
-
-		MLX5_SET(dest_format_struct, dest_addr, destination_type,
-			 dest_n->dest.type);
-		MLX5_SET(dest_format_struct, dest_addr, destination_id,
-			 dest_n->dest.vport_num);
-	}
-
-	in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
-	memcpy(in_match_value, fr->match_value, MLX5_ST_SZ_BYTES(fte_match_param));
-
-	err = mlx5_add_flow_table_entry(fr->ft, fr->match_criteria_enable,
-					fr->match_criteria, flow_context,
-					&flow_index);
-	if (!err) {
-		if (was_valid)
-			mlx5_del_flow_table_entry(fr->ft, fr->fi);
-		fr->fi = flow_index;
-		fr->valid = true;
-	}
-	kfree(flow_context);
-	return err;
-}
-
-static int mlx5_flow_rule_add_dest(struct mlx5_flow_rule *fr,
-				   struct mlx5_flow_destination *new_dest)
-{
-	struct dest_node *dest_n;
-	int err;
-
-	dest_n = kzalloc(sizeof(*dest_n), GFP_KERNEL);
-	if (!dest_n)
-		return -ENOMEM;
-
-	memcpy(&dest_n->dest, new_dest, sizeof(dest_n->dest));
-	mutex_lock(&fr->mutex);
-	list_add(&dest_n->list, &fr->dest_list);
-	err = _mlx5_flow_rule_apply(fr);
-	if (err) {
-		list_del(&dest_n->list);
-		kfree(dest_n);
-	}
-	mutex_unlock(&fr->mutex);
-	return err;
-}
-
-static int mlx5_flow_rule_del_dest(struct mlx5_flow_rule *fr,
-				   struct mlx5_flow_destination *dest)
-{
-	struct dest_node *dest_n;
-	struct dest_node *n;
-	int err;
-
-	mutex_lock(&fr->mutex);
-	list_for_each_entry_safe(dest_n, n, &fr->dest_list, list) {
-		if (dest->vport_num == dest_n->dest.vport_num)
-			goto found;
-	}
-	mutex_unlock(&fr->mutex);
-	return -ENOENT;
-
-found:
-	list_del(&dest_n->list);
-	err = _mlx5_flow_rule_apply(fr);
-	mutex_unlock(&fr->mutex);
-	kfree(dest_n);
-
-	return err;
-}
-
-static struct mlx5_flow_rule *find_fr(struct mlx5_eswitch *esw,
-				      u8 match_criteria_enable,
-				      u32 *match_value)
-{
-	struct hlist_head *hash = esw->mc_table;
-	struct esw_mc_addr *esw_mc;
-	u8 *dmac_v;
-
-	dmac_v = MLX5_ADDR_OF(fte_match_param, match_value,
-			      outer_headers.dmac_47_16);
-
-	/* UNICAST FULL MATCH */
-	if (!is_multicast_ether_addr(dmac_v))
-		return NULL;
-
-	/* MULTICAST FULL MATCH */
-	esw_mc = l2addr_hash_find(hash, dmac_v, struct esw_mc_addr);
-
-	return esw_mc ? esw_mc->uplink_rule : NULL;
-}
-
-static struct mlx5_flow_rule *alloc_fr(void *ft,
-				       u8 match_criteria_enable,
-				       u32 *match_criteria,
-				       u32 *match_value,
-				       u32 action,
-				       u32 flow_tag)
-{
-	struct mlx5_flow_rule *fr = kzalloc(sizeof(*fr), GFP_KERNEL);
-
-	if (!fr)
-		return NULL;
-
-	fr->match_criteria = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
-	fr->match_value = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
-	if (!fr->match_criteria || !fr->match_value) {
-		kfree(fr->match_criteria);
-		kfree(fr->match_value);
-		kfree(fr);
-		return NULL;
-	}
-
-	memcpy(fr->match_criteria, match_criteria, MLX5_ST_SZ_BYTES(fte_match_param));
-	memcpy(fr->match_value, match_value, MLX5_ST_SZ_BYTES(fte_match_param));
-	fr->match_criteria_enable = match_criteria_enable;
-	fr->flow_tag = flow_tag;
-	fr->action = action;
-
-	mutex_init(&fr->mutex);
-	INIT_LIST_HEAD(&fr->dest_list);
-	atomic_set(&fr->refcount, 0);
-	fr->ft = ft;
-	return fr;
-}
-
-static void deref_fr(struct mlx5_flow_rule *fr)
-{
-	if (!atomic_dec_and_test(&fr->refcount))
-		return;
-
-	kfree(fr->match_criteria);
-	kfree(fr->match_value);
-	kfree(fr);
-}
-
-static struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_eswitch *esw,
-		   u8 match_criteria_enable,
-		   u32 *match_criteria,
-		   u32 *match_value,
-		   u32 action,
-		   u32 flow_tag,
-		   struct mlx5_flow_destination *dest)
-{
-	struct mlx5_flow_rule *fr;
-	int err;
-
-	fr = find_fr(esw, match_criteria_enable, match_value);
-	fr = fr ? fr : alloc_fr(esw->fdb_table.fdb, match_criteria_enable, match_criteria,
-				match_value, action, flow_tag);
-	if (!fr)
-		return NULL;
-
-	atomic_inc(&fr->refcount);
-
-	err = mlx5_flow_rule_add_dest(fr, dest);
-	if (err) {
-		deref_fr(fr);
-		return NULL;
-	}
-
-	return fr;
-}
-
-static void mlx5_del_flow_rule(struct mlx5_flow_rule *fr, u32 vport)
-{
-	struct mlx5_flow_destination dest;
-
-	dest.vport_num = vport;
-	mlx5_flow_rule_del_dest(fr, &dest);
-	deref_fr(fr);
-}
-
 /* E-Switch FDB */
 static struct mlx5_flow_rule *
 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
@@ -569,7 +355,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
 		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
 		  dmac_v, dmac_c, vport);
 	flow_rule =
-		mlx5_add_flow_rule(esw,
+		mlx5_add_flow_rule(esw->fdb_table.fdb,
 				   match_header,
 				   match_c,
 				   match_v,
@@ -589,33 +375,61 @@ out:
 
 static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
 {
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
 	struct mlx5_core_dev *dev = esw->dev;
-	struct mlx5_flow_table_group g;
+	struct mlx5_flow_namespace *root_ns;
 	struct mlx5_flow_table *fdb;
+	struct mlx5_flow_group *g;
+	void *match_criteria;
+	int table_size;
+	u32 *flow_group_in;
 	u8 *dmac;
+	int err = 0;
 
 	esw_debug(dev, "Create FDB log_max_size(%d)\n",
 		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
 
-	memset(&g, 0, sizeof(g));
-	/* UC MC Full match rules*/
-	g.log_sz = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
-	g.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	dmac = MLX5_ADDR_OF(fte_match_param, g.match_criteria,
-			    outer_headers.dmac_47_16);
-	/* Match criteria mask */
-	memset(dmac, 0xff, 6);
+	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+	if (!root_ns) {
+		esw_warn(dev, "Failed to get FDB flow namespace\n");
+		return -ENOMEM;
+	}
 
-	fdb = mlx5_create_flow_table(dev, 0,
-				     MLX5_FLOW_TABLE_TYPE_ESWITCH,
-				     1, &g);
-	if (fdb)
-		esw_debug(dev, "ESW: FDB Table created fdb->id %d\n", mlx5_get_flow_table_id(fdb));
-	else
-		esw_warn(dev, "ESW: Failed to create FDB Table\n");
+	flow_group_in = mlx5_vzalloc(inlen);
+	if (!flow_group_in)
+		return -ENOMEM;
+	memset(flow_group_in, 0, inlen);
+
+	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+	fdb = mlx5_create_flow_table(root_ns, 0, table_size);
+	if (IS_ERR_OR_NULL(fdb)) {
+		err = PTR_ERR(fdb);
+		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
+		goto out;
+	}
 
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+		 MLX5_MATCH_OUTER_HEADERS);
+	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+	eth_broadcast_addr(dmac);
+
+	g = mlx5_create_flow_group(fdb, flow_group_in);
+	if (IS_ERR_OR_NULL(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
+		goto out;
+	}
+
+	esw->fdb_table.addr_grp = g;
 	esw->fdb_table.fdb = fdb;
-	return fdb ? 0 : -ENOMEM;
+out:
+	kfree(flow_group_in);
+	if (err && !IS_ERR_OR_NULL(fdb))
+		mlx5_destroy_flow_table(fdb);
+	return err;
 }
 
 static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
@@ -623,10 +437,11 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
 	if (!esw->fdb_table.fdb)
 		return;
 
-	esw_debug(esw->dev, "Destroy FDB Table fdb(%d)\n",
-		  mlx5_get_flow_table_id(esw->fdb_table.fdb));
+	esw_debug(esw->dev, "Destroy FDB Table\n");
+	mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
 	mlx5_destroy_flow_table(esw->fdb_table.fdb);
 	esw->fdb_table.fdb = NULL;
+	esw->fdb_table.addr_grp = NULL;
 }
 
 /* E-Switch vport UC/MC lists management */
@@ -689,7 +504,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
 	del_l2_table_entry(esw->dev, esw_uc->table_index);
 
 	if (vaddr->flow_rule)
-		mlx5_del_flow_rule(vaddr->flow_rule, vport);
+		mlx5_del_flow_rule(vaddr->flow_rule);
 	vaddr->flow_rule = NULL;
 
 	l2addr_hash_del(esw_uc);
@@ -750,14 +565,14 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
 		  esw_mc->uplink_rule);
 
 	if (vaddr->flow_rule)
-		mlx5_del_flow_rule(vaddr->flow_rule, vport);
+		mlx5_del_flow_rule(vaddr->flow_rule);
 	vaddr->flow_rule = NULL;
 
 	if (--esw_mc->refcnt)
 		return 0;
 
 	if (esw_mc->uplink_rule)
-		mlx5_del_flow_rule(esw_mc->uplink_rule, UPLINK_VPORT);
+		mlx5_del_flow_rule(esw_mc->uplink_rule);
 
 	l2addr_hash_del(esw_mc);
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 02ff3ea..3416a42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -88,20 +88,6 @@ struct l2addr_node {
 	kfree(ptr);                                         \
 })
 
-struct mlx5_flow_rule {
-	void             *ft;
-	u32              fi;
-	u8               match_criteria_enable;
-	u32              *match_criteria;
-	u32              *match_value;
-	u32              action;
-	u32              flow_tag;
-	bool             valid;
-	atomic_t         refcount;
-	struct mutex     mutex; /* protect flow rule updates */
-	struct list_head dest_list;
-};
-
 struct mlx5_vport {
 	struct mlx5_core_dev    *dev;
 	int                     vport;
@@ -126,6 +112,7 @@ struct mlx5_l2_table {
 
 struct mlx5_eswitch_fdb {
 	void *fdb;
+	struct mlx5_flow_group *addr_grp;
 };
 
 struct mlx5_eswitch {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
deleted file mode 100644
index ca90b9b..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/flow_table.h>
-#include "mlx5_core.h"
-
-struct mlx5_ftg {
-	struct mlx5_flow_table_group    g;
-	u32				id;
-	u32				start_ix;
-};
-
-struct mlx5_flow_table {
-	struct mlx5_core_dev	*dev;
-	u8			level;
-	u8			type;
-	u32			id;
-	struct mutex		mutex; /* sync bitmap alloc */
-	u16			num_groups;
-	struct mlx5_ftg		*group;
-	unsigned long		*bitmap;
-	u32			size;
-};
-
-static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
-				   u32 flow_index, void *flow_context)
-{
-	u32 out[MLX5_ST_SZ_DW(set_fte_out)];
-	u32 *in;
-	void *in_flow_context;
-	int fcdls =
-		MLX5_GET(flow_context, flow_context, destination_list_size) *
-		MLX5_ST_SZ_BYTES(dest_format_struct);
-	int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
-	int err;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
-		return -ENOMEM;
-	}
-
-	MLX5_SET(set_fte_in, in, table_type, ft->type);
-	MLX5_SET(set_fte_in, in, table_id,   ft->id);
-	MLX5_SET(set_fte_in, in, flow_index, flow_index);
-	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
-
-	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
-	memcpy(in_flow_context, flow_context,
-	       MLX5_ST_SZ_BYTES(flow_context) + fcdls);
-
-	MLX5_SET(flow_context, in_flow_context, group_id,
-		 ft->group[group_ix].id);
-
-	memset(out, 0, sizeof(out));
-	err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
-					 sizeof(out));
-	kvfree(in);
-
-	return err;
-}
-
-static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
-{
-	u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
-	u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
-
-	memset(in, 0, sizeof(in));
-	memset(out, 0, sizeof(out));
-
-#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
-	MLX5_SET_DFTEI(in, table_type, ft->type);
-	MLX5_SET_DFTEI(in, table_id,   ft->id);
-	MLX5_SET_DFTEI(in, flow_index, flow_index);
-	MLX5_SET_DFTEI(in, opcode,     MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
-
-	mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
-}
-
-static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
-	u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
-
-	memset(in, 0, sizeof(in));
-	memset(out, 0, sizeof(out));
-
-#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
-	MLX5_SET_DFGI(in, table_type, ft->type);
-	MLX5_SET_DFGI(in, table_id,   ft->id);
-	MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
-	MLX5_SET_DFGI(in, group_id, ft->group[i].id);
-	mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
-{
-	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
-	u32 *in;
-	void *in_match_criteria;
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct mlx5_flow_table_group *g = &ft->group[i].g;
-	u32 start_ix = ft->group[i].start_ix;
-	u32 end_ix = start_ix + (1 << g->log_sz) - 1;
-	int err;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
-		return -ENOMEM;
-	}
-	in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
-					 match_criteria);
-
-	memset(out, 0, sizeof(out));
-
-#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
-	MLX5_SET_CFGI(in, table_type,            ft->type);
-	MLX5_SET_CFGI(in, table_id,              ft->id);
-	MLX5_SET_CFGI(in, opcode,                MLX5_CMD_OP_CREATE_FLOW_GROUP);
-	MLX5_SET_CFGI(in, start_flow_index,      start_ix);
-	MLX5_SET_CFGI(in, end_flow_index,        end_ix);
-	MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
-
-	memcpy(in_match_criteria, g->match_criteria,
-	       MLX5_ST_SZ_BYTES(fte_match_param));
-
-	err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
-					 sizeof(out));
-	if (!err)
-		ft->group[i].id = MLX5_GET(create_flow_group_out, out,
-					   group_id);
-
-	kvfree(in);
-
-	return err;
-}
-
-static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
-{
-	int i;
-
-	for (i = 0; i < ft->num_groups; i++)
-		mlx5_destroy_flow_group_cmd(ft, i);
-}
-
-static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
-{
-	int err;
-	int i;
-
-	for (i = 0; i < ft->num_groups; i++) {
-		err = mlx5_create_flow_group_cmd(ft, i);
-		if (err)
-			goto err_destroy_flow_table_groups;
-	}
-
-	return 0;
-
-err_destroy_flow_table_groups:
-	for (i--; i >= 0; i--)
-		mlx5_destroy_flow_group_cmd(ft, i);
-
-	return err;
-}
-
-static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
-{
-	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
-	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
-	int err;
-
-	memset(in, 0, sizeof(in));
-
-	MLX5_SET(create_flow_table_in, in, table_type, ft->type);
-	MLX5_SET(create_flow_table_in, in, level,      ft->level);
-	MLX5_SET(create_flow_table_in, in, log_size,   order_base_2(ft->size));
-
-	MLX5_SET(create_flow_table_in, in, opcode,
-		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
-
-	memset(out, 0, sizeof(out));
-	err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
-					 sizeof(out));
-	if (err)
-		return err;
-
-	ft->id = MLX5_GET(create_flow_table_out, out, table_id);
-
-	return 0;
-}
-
-static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
-	u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
-
-	memset(in, 0, sizeof(in));
-	memset(out, 0, sizeof(out));
-
-#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
-	MLX5_SET_DFTI(in, table_type, ft->type);
-	MLX5_SET_DFTI(in, table_id,   ft->id);
-	MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
-
-	mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
-			   u32 *match_criteria, int *group_ix)
-{
-	void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
-				      outer_headers);
-	void *mc_misc  = MLX5_ADDR_OF(fte_match_param, match_criteria,
-				      misc_parameters);
-	void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
-				      inner_headers);
-	int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
-	int mc_misc_sz  = MLX5_ST_SZ_BYTES(fte_match_set_misc);
-	int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
-	int i;
-
-	for (i = 0; i < ft->num_groups; i++) {
-		struct mlx5_flow_table_group *g = &ft->group[i].g;
-		void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
-					       g->match_criteria,
-					       outer_headers);
-		void *gmc_misc  = MLX5_ADDR_OF(fte_match_param,
-					       g->match_criteria,
-					       misc_parameters);
-		void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
-					       g->match_criteria,
-					       inner_headers);
-
-		if (g->match_criteria_enable != match_criteria_enable)
-			continue;
-
-		if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
-			if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
-				continue;
-
-		if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
-			if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
-				continue;
-
-		if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
-			if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
-				continue;
-
-		*group_ix = i;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
-{
-	struct mlx5_ftg *g = &ft->group[group_ix];
-	int err = 0;
-
-	mutex_lock(&ft->mutex);
-
-	*ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
-	if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
-		err = -ENOSPC;
-	else
-		__set_bit(*ix, ft->bitmap);
-
-	mutex_unlock(&ft->mutex);
-
-	return err;
-}
-
-static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
-{
-	__clear_bit(ix, ft->bitmap);
-}
-
-int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
-			      void *match_criteria, void *flow_context,
-			      u32 *flow_index)
-{
-	struct mlx5_flow_table *ft = flow_table;
-	int group_ix;
-	int err;
-
-	err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
-			      &group_ix);
-	if (err) {
-		mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
-		return err;
-	}
-
-	err = alloc_flow_index(ft, group_ix, flow_index);
-	if (err) {
-		mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
-		return err;
-	}
-
-	return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
-}
-EXPORT_SYMBOL(mlx5_add_flow_table_entry);
-
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
-{
-	struct mlx5_flow_table *ft = flow_table;
-
-	mlx5_del_flow_entry_cmd(ft, flow_index);
-	mlx5_free_flow_index(ft, flow_index);
-}
-EXPORT_SYMBOL(mlx5_del_flow_table_entry);
-
-void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
-			     u16 num_groups,
-			     struct mlx5_flow_table_group *group)
-{
-	struct mlx5_flow_table *ft;
-	u32 start_ix = 0;
-	u32 ft_size = 0;
-	void *gr;
-	void *bm;
-	int err;
-	int i;
-
-	for (i = 0; i < num_groups; i++)
-		ft_size += (1 << group[i].log_sz);
-
-	ft = kzalloc(sizeof(*ft), GFP_KERNEL);
-	gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
-	bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
-	if (!ft || !gr || !bm)
-		goto err_free_ft;
-
-	ft->group	= gr;
-	ft->bitmap	= bm;
-	ft->num_groups	= num_groups;
-	ft->level	= level;
-	ft->type	= table_type;
-	ft->size	= ft_size;
-	ft->dev		= dev;
-	mutex_init(&ft->mutex);
-
-	for (i = 0; i < ft->num_groups; i++) {
-		memcpy(&ft->group[i].g, &group[i], sizeof(*group));
-		ft->group[i].start_ix = start_ix;
-		start_ix += 1 << group[i].log_sz;
-	}
-
-	err = mlx5_create_flow_table_cmd(ft);
-	if (err)
-		goto err_free_ft;
-
-	err = mlx5_create_flow_table_groups(ft);
-	if (err)
-		goto err_destroy_flow_table_cmd;
-
-	return ft;
-
-err_destroy_flow_table_cmd:
-	mlx5_destroy_flow_table_cmd(ft);
-
-err_free_ft:
-	mlx5_core_warn(dev, "failed to alloc flow table\n");
-	kfree(bm);
-	kfree(gr);
-	kfree(ft);
-
-	return NULL;
-}
-EXPORT_SYMBOL(mlx5_create_flow_table);
-
-void mlx5_destroy_flow_table(void *flow_table)
-{
-	struct mlx5_flow_table *ft = flow_table;
-
-	mlx5_destroy_flow_table_groups(ft);
-	mlx5_destroy_flow_table_cmd(ft);
-	kfree(ft->bitmap);
-	kfree(ft->group);
-	kfree(ft);
-}
-EXPORT_SYMBOL(mlx5_destroy_flow_table);
-
-u32 mlx5_get_flow_table_id(void *flow_table)
-{
-	struct mlx5_flow_table *ft = flow_table;
-
-	return ft->id;
-}
-EXPORT_SYMBOL(mlx5_get_flow_table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 4a83632..629ccbe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -436,9 +436,9 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
 	return ft;
 }
 
-static struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
-						      int prio,
-						      int max_fte)
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+					       int prio,
+					       int max_fte)
 {
 	struct mlx5_flow_table *ft;
 	int err;
@@ -491,8 +491,8 @@ unlock_prio:
 	return ERR_PTR(err);
 }
 
-static struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
-						      u32 *fg_in)
+struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
+					       u32 *fg_in)
 {
 	struct mlx5_flow_group *fg;
 	struct mlx5_core_dev *dev = get_dev(&ft->node);
@@ -669,7 +669,7 @@ unlock_fg:
 	return rule;
 }
 
-static struct mlx5_flow_rule *
+struct mlx5_flow_rule *
 mlx5_add_flow_rule(struct mlx5_flow_table *ft,
 		   u8 match_criteria_enable,
 		   u32 *match_criteria,
@@ -699,12 +699,12 @@ put:
 	return rule;
 }
 
-static void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
+void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
 {
 	tree_remove_node(&rule->node);
 }
 
-static int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
 {
 	if (tree_remove_node(&ft->node))
 		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
@@ -713,15 +713,15 @@ static int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
 	return 0;
 }
 
-static void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
 {
 	if (tree_remove_node(&fg->node))
 		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
 			       fg->id);
 }
 
-static struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
-							   enum mlx5_flow_namespace_type type)
+struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+						    enum mlx5_flow_namespace_type type)
 {
 	struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
 	int prio;
@@ -867,7 +867,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev
 	struct mlx5_flow_root_namespace *root_ns;
 	struct mlx5_flow_namespace *ns;
 
-	/* create the root namespace */
+	/* Create the root namespace */
 	root_ns = mlx5_vzalloc(sizeof(*root_ns));
 	if (!root_ns)
 		return NULL;
@@ -1010,7 +1010,7 @@ static int init_fdb_root_ns(struct mlx5_core_dev *dev)
 	if (!dev->priv.fdb_root_ns)
 		return -ENOMEM;
 
-	/* create 1 prio*/
+	/* Create single prio */
 	prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1, 0);
 	if (IS_ERR(prio)) {
 		cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c6de324..789882b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -49,6 +49,7 @@
 #include <linux/delay.h>
 #include <linux/mlx5/mlx5_ifc.h>
 #include "mlx5_core.h"
+#include "fs_core.h"
 #ifdef CONFIG_MLX5_CORE_EN
 #include "eswitch.h"
 #endif
@@ -1055,6 +1056,11 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
 	mlx5_init_srq_table(dev);
 	mlx5_init_mr_table(dev);
 
+	err = mlx5_init_fs(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to init flow steering\n");
+		goto err_fs;
+	}
 #ifdef CONFIG_MLX5_CORE_EN
 	err = mlx5_eswitch_init(dev);
 	if (err) {
@@ -1093,6 +1099,8 @@ err_sriov:
 	mlx5_eswitch_cleanup(dev->priv.eswitch);
 #endif
 err_reg_dev:
+	mlx5_cleanup_fs(dev);
+err_fs:
 	mlx5_cleanup_mr_table(dev);
 	mlx5_cleanup_srq_table(dev);
 	mlx5_cleanup_qp_table(dev);
@@ -1165,6 +1173,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
 	mlx5_eswitch_cleanup(dev->priv.eswitch);
 #endif
 
+	mlx5_cleanup_fs(dev);
 	mlx5_cleanup_mr_table(dev);
 	mlx5_cleanup_srq_table(dev);
 	mlx5_cleanup_qp_table(dev);
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
deleted file mode 100644
index 0f2a15c..0000000
--- a/include/linux/mlx5/flow_table.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_FLOW_TABLE_H
-#define MLX5_FLOW_TABLE_H
-
-#include <linux/mlx5/driver.h>
-
-struct mlx5_flow_table_group {
-	u8	log_sz;
-	u8	match_criteria_enable;
-	u32	match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
-};
-
-struct mlx5_flow_destination {
-	enum mlx5_flow_destination_type	type;
-	union {
-		u32			tir_num;
-		void			*ft;
-		u32			vport_num;
-	};
-};
-
-void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
-			     u16 num_groups,
-			     struct mlx5_flow_table_group *group);
-void mlx5_destroy_flow_table(void *flow_table);
-int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
-			      void *match_criteria, void *flow_context,
-			      u32 *flow_index);
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
-u32 mlx5_get_flow_table_id(void *flow_table);
-
-#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 16ae523..bc7ad01 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -33,6 +33,7 @@
 #ifndef _MLX5_FS_
 #define _MLX5_FS_
 
+#include <linux/mlx5/driver.h>
 #include <linux/mlx5/mlx5_ifc.h>
 
 #define MLX5_FS_DEFAULT_FLOW_TAG 0x0
@@ -43,6 +44,9 @@ enum mlx5_flow_namespace_type {
 };
 
 struct mlx5_flow_table;
+struct mlx5_flow_group;
+struct mlx5_flow_rule;
+struct mlx5_flow_namespace;
 
 struct mlx5_flow_destination {
 	enum mlx5_flow_destination_type	type;
@@ -52,4 +56,38 @@ struct mlx5_flow_destination {
 		u32			vport_num;
 	};
 };
+
+struct mlx5_flow_namespace *
+mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+			enum mlx5_flow_namespace_type type);
+
+struct mlx5_flow_table *
+mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+		       int prio,
+		       int num_flow_table_entries);
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
+
+/* inbox should be set with the following values:
+ * start_flow_index
+ * end_flow_index
+ * match_criteria_enable
+ * match_criteria
+ */
+struct mlx5_flow_group *
+mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+
+/* Single destination per rule.
+ * Group ID is implied by the match criteria.
+ */
+struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+		   u8 match_criteria_enable,
+		   u32 *match_criteria,
+		   u32 *match_value,
+		   u32 action,
+		   u32 flow_tag,
+		   struct mlx5_flow_destination *dest);
+void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
+
 #endif
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ