[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1572551213-9022-4-git-send-email-lariel@mellanox.com>
Date: Thu, 31 Oct 2019 19:47:52 +0000
From: Ariel Levkovich <lariel@...lanox.com>
To: "netdev@...r.kernel.org" <netdev@...r.kernel.org>
CC: Saeed Mahameed <saeedm@...lanox.com>,
"sd@...asysnail.net" <sd@...asysnail.net>,
"sbrivio@...hat.com" <sbrivio@...hat.com>,
"nikolay@...ulusnetworks.com" <nikolay@...ulusnetworks.com>,
Jiri Pirko <jiri@...lanox.com>,
"dsahern@...il.com" <dsahern@...il.com>,
"stephen@...workplumber.org" <stephen@...workplumber.org>,
Ariel Levkovich <lariel@...lanox.com>
Subject: [PATCH net-next v2 3/3] net/mlx5: Add SRIOV VGT+ support
Implementing the VGT+ feature via acl tables.
The acl tables will hold the actual needed rules which is only the
intersection of the requested vlan-ids list and the allowed vlan-ids
list from the administrator.
Signed-off-by: Ariel Levkovich <lariel@...lanox.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 30 ++
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 600 ++++++++++++++++-----
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 27 +-
.../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 8 +-
4 files changed, 533 insertions(+), 132 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7569287..9253bfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4061,6 +4061,34 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
vlan, qos);
}
+static int mlx5e_add_vf_vlan_trunk_range(struct net_device *dev, int vf,
+ u16 start_vid, u16 end_vid,
+ __be16 vlan_proto)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ return mlx5_eswitch_add_vport_trunk_range(mdev->priv.eswitch, vf + 1,
+ start_vid, end_vid);
+}
+
+static int mlx5e_del_vf_vlan_trunk_range(struct net_device *dev, int vf,
+ u16 start_vid, u16 end_vid,
+ __be16 vlan_proto)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ return mlx5_eswitch_del_vport_trunk_range(mdev->priv.eswitch, vf + 1,
+ start_vid, end_vid);
+}
+
static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -4589,6 +4617,8 @@ static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/* SRIOV E-Switch NDOs */
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
+ .ndo_add_vf_vlan_trunk_range = mlx5e_add_vf_vlan_trunk_range,
+ .ndo_del_vf_vlan_trunk_range = mlx5e_del_vf_vlan_trunk_range,
.ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
.ndo_set_vf_trust = mlx5e_set_vf_trust,
.ndo_set_vf_rate = mlx5e_set_vf_rate,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7baade9..911421e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -58,6 +58,11 @@ struct vport_addr {
bool mc_promisc;
};
+struct mlx5_acl_vlan {
+ struct mlx5_flow_handle *acl_vlan_rule;
+ struct list_head list;
+};
+
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
@@ -452,6 +457,7 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_VLAN_CHANGE | \
MLX5_VPORT_PROMISC_CHANGE)
static int esw_legacy_enable(struct mlx5_eswitch *esw)
@@ -793,6 +799,94 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
kfree(mac_list);
}
+static void esw_update_acl_trunk_bitmap(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+
+ bitmap_and(vport->acl_vlan_8021q_bitmap, vport->req_vlan_bitmap,
+ vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID);
+}
+
+static int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, u32 vport,
+ unsigned long *vlans)
+{
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ void *nic_vport_ctx;
+ int req_list_size;
+ int out_sz;
+ void *out;
+ int err;
+ int i;
+
+ req_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
+ out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+ req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+ MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
+ MLX5_NVPRT_LIST_TYPE_VLAN);
+ MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+
+ if (vport)
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto out;
+
+ nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context);
+ req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
+ allowed_list_size);
+
+ for (i = 0; i < req_list_size; i++) {
+ void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
+ nic_vport_ctx,
+ current_uc_mac_address[i]);
+ bitmap_set(vlans, MLX5_GET(vlan_layout, vlan_addr, vlan), 1);
+ }
+out:
+ kfree(out);
+ return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+
+/* Sync vport vlan list from vport context */
+static void esw_update_vport_vlan_list(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ DECLARE_BITMAP(prev_vlans_bitmap, VLAN_N_VID);
+ int err;
+
+ bitmap_copy(prev_vlans_bitmap, vport->req_vlan_bitmap, VLAN_N_VID);
+ bitmap_zero(vport->req_vlan_bitmap, VLAN_N_VID);
+
+ if (!vport->enabled)
+ return;
+
+ err = mlx5_query_nic_vport_vlans(esw->dev, vport_num, vport->req_vlan_bitmap);
+ if (err)
+ return;
+
+ bitmap_xor(prev_vlans_bitmap, prev_vlans_bitmap, vport->req_vlan_bitmap, VLAN_N_VID);
+ if (!bitmap_weight(prev_vlans_bitmap, VLAN_N_VID))
+ return;
+
+ esw_update_acl_trunk_bitmap(esw, vport_num);
+ esw_vport_egress_config(esw, vport);
+ esw_vport_ingress_config(esw, vport);
+}
+
/* Sync vport UC/MC list from vport context
* Must be called after esw_update_vport_addr_list
*/
@@ -920,6 +1014,9 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
+ if (vport->enabled_events & MLX5_VPORT_VLAN_CHANGE)
+ esw_update_vport_vlan_list(esw, vport->vport);
+
if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
@@ -950,18 +1047,20 @@ int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *untagged_grp = NULL;
struct mlx5_flow_group *vlan_grp = NULL;
struct mlx5_flow_group *drop_grp = NULL;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
+ /* The egress acl table contains 3 groups:
+ * 1)Allow tagged traffic with vlan_tag=vst_vlan_id/vgt+_vlan_id
+ * 2)Allow untagged traffic
+ * 3)Drop all other traffic
+ */
+ int table_size = VLAN_N_VID + 2;
void *match_criteria;
u32 *flow_group_in;
- /* The egress acl table contains 2 rules:
- * 1)Allow traffic with vlan_tag=vst_vlan_id
- * 2)Drop all other traffic.
- */
- int table_size = 2;
int err = 0;
if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
@@ -994,11 +1093,25 @@ int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ /* Create flow group for allowed untagged flow rule */
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+ untagged_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(untagged_grp)) {
+ err = PTR_ERR(untagged_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress untagged flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ /* Create flow group for allowed tagged flow rules */
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, VLAN_N_VID);
+
vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR(vlan_grp)) {
err = PTR_ERR(vlan_grp);
@@ -1007,9 +1120,10 @@ int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
goto out;
}
+ /* Create flow group for drop rule */
memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, VLAN_N_VID + 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, VLAN_N_VID + 1);
drop_grp = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR(drop_grp)) {
err = PTR_ERR(drop_grp);
@@ -1021,27 +1135,45 @@ int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = acl;
vport->egress.drop_grp = drop_grp;
vport->egress.allowed_vlans_grp = vlan_grp;
+ vport->egress.allow_untagged_grp = untagged_grp;
+
out:
+ if (err) {
+ if (!IS_ERR_OR_NULL(vlan_grp))
+ mlx5_destroy_flow_group(vlan_grp);
+ if (!IS_ERR_OR_NULL(untagged_grp))
+ mlx5_destroy_flow_group(untagged_grp);
+ if (!IS_ERR_OR_NULL(acl))
+ mlx5_destroy_flow_table(acl);
+ }
kvfree(flow_group_in);
- if (err && !IS_ERR_OR_NULL(vlan_grp))
- mlx5_destroy_flow_group(vlan_grp);
- if (err && !IS_ERR_OR_NULL(acl))
- mlx5_destroy_flow_table(acl);
return err;
}
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
- mlx5_del_flow_rules(vport->egress.allowed_vlan);
- vport->egress.allowed_vlan = NULL;
+ struct mlx5_acl_vlan *trunk_vlan_rule, *tmp;
+
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vst_vlan)) {
+ mlx5_del_flow_rules(vport->egress.allowed_vst_vlan);
+ vport->egress.allowed_vst_vlan = NULL;
+ }
+
+ list_for_each_entry_safe(trunk_vlan_rule, tmp,
+ &vport->egress.legacy.allowed_vlans_rules, list) {
+ mlx5_del_flow_rules(trunk_vlan_rule->acl_vlan_rule);
+ list_del(&trunk_vlan_rule->list);
+ kfree(trunk_vlan_rule);
}
if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
vport->egress.legacy.drop_rule = NULL;
}
+
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.allow_untagged_rule))
+ mlx5_del_flow_rules(vport->egress.legacy.allow_untagged_rule);
}
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
@@ -1053,9 +1185,11 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
esw_vport_cleanup_egress_rules(esw, vport);
+ mlx5_destroy_flow_group(vport->egress.allow_untagged_grp);
mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
mlx5_destroy_flow_group(vport->egress.drop_grp);
mlx5_destroy_flow_table(vport->egress.acl);
+ vport->egress.allow_untagged_grp = NULL;
vport->egress.allowed_vlans_grp = NULL;
vport->egress.drop_grp = NULL;
vport->egress.acl = NULL;
@@ -1065,12 +1199,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ bool need_vlan_filter = !!bitmap_weight(vport->info.vlan_trunk_8021q_bitmap,
+ VLAN_N_VID);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *untagged_spoof_grp = NULL;
+ struct mlx5_flow_table *acl = vport->ingress.acl;
+ struct mlx5_flow_group *tagged_spoof_grp = NULL;
+ struct mlx5_flow_group *drop_grp = NULL;
struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
- int err;
+ int allow_grp_sz = 0;
+ int err = 0;
+
+ if (!acl)
+ return -EINVAL;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
@@ -1079,83 +1222,68 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+
+ if (vport->info.vlan || vport->info.qos || need_vlan_filter)
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+
+ if (vport->info.spoofchk) {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ }
+
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
+ untagged_spoof_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(untagged_spoof_grp)) {
+ err = PTR_ERR(untagged_spoof_grp);
esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
- goto spoof_err;
+ goto out;
}
- vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
+ allow_grp_sz += 1;
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ if (!need_vlan_filter)
+ goto drop_grp;
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, VLAN_N_VID);
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
+ tagged_spoof_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(tagged_spoof_grp)) {
+ err = PTR_ERR(tagged_spoof_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress tagged spoofchk flow group, err(%d)\n",
vport->vport, err);
- goto untagged_err;
+ goto out;
}
- vport->ingress.legacy.allow_untagged_only_grp = g;
+ allow_grp_sz += VLAN_N_VID;
+drop_grp:
memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, allow_grp_sz);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, allow_grp_sz);
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
+ drop_grp = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(drop_grp)) {
+ err = PTR_ERR(drop_grp);
+ esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, err);
- goto allow_spoof_err;
+ goto out;
}
- vport->ingress.legacy.allow_spoofchk_only_grp = g;
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = untagged_spoof_grp;
+ vport->ingress.legacy.allow_tagged_spoofchk_grp = tagged_spoof_grp;
+ vport->ingress.legacy.drop_grp = drop_grp;
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
- vport->vport, err);
- goto drop_err;
+out:
+ if (err) {
+ if (!IS_ERR_OR_NULL(tagged_spoof_grp))
+ mlx5_destroy_flow_group(tagged_spoof_grp);
+ if (!IS_ERR_OR_NULL(untagged_spoof_grp))
+ mlx5_destroy_flow_group(untagged_spoof_grp);
}
- vport->ingress.legacy.drop_grp = g;
- kvfree(flow_group_in);
- return 0;
-drop_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
- vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
- }
-allow_spoof_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
- vport->ingress.legacy.allow_untagged_only_grp = NULL;
- }
-untagged_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
- vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
- }
-spoof_err:
kvfree(flow_group_in);
return err;
}
@@ -1207,14 +1335,23 @@ void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_acl_vlan *trunk_vlan_rule, *tmp;
+
if (vport->ingress.legacy.drop_rule) {
mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
vport->ingress.legacy.drop_rule = NULL;
}
- if (vport->ingress.allow_rule) {
- mlx5_del_flow_rules(vport->ingress.allow_rule);
- vport->ingress.allow_rule = NULL;
+ list_for_each_entry_safe(trunk_vlan_rule, tmp,
+ &vport->ingress.legacy.allowed_vlans_rules, list) {
+ mlx5_del_flow_rules(trunk_vlan_rule->acl_vlan_rule);
+ list_del(&trunk_vlan_rule->list);
+ kfree(trunk_vlan_rule);
+ }
+
+ if (vport->ingress.allow_untagged_rule) {
+ mlx5_del_flow_rules(vport->ingress.allow_untagged_rule);
+ vport->ingress.allow_untagged_rule = NULL;
}
}
@@ -1227,18 +1364,14 @@ static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
esw_vport_cleanup_ingress_rules(esw, vport);
- if (vport->ingress.legacy.allow_spoofchk_only_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
- vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
- }
- if (vport->ingress.legacy.allow_untagged_only_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
- vport->ingress.legacy.allow_untagged_only_grp = NULL;
- }
if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
}
+ if (vport->ingress.legacy.allow_tagged_spoofchk_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_tagged_spoofchk_grp);
+ vport->ingress.legacy.allow_tagged_spoofchk_grp = NULL;
+ }
if (vport->ingress.legacy.drop_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
vport->ingress.legacy.drop_grp = NULL;
@@ -1249,33 +1382,47 @@ static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ bool need_vlan_filter = !!bitmap_weight(vport->info.vlan_trunk_8021q_bitmap,
+ VLAN_N_VID);
struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_acl_vlan *trunk_vlan_rule;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ bool need_acl_table;
int dest_num = 0;
+ u16 vlan_id = 0;
int err = 0;
u8 *smac_v;
- /* The ingress acl table contains 4 groups
+ /* The ingress acl table contains 3 groups
* (2 active rules at the same time -
- * 1 allow rule from one of the first 3 groups.
- * 1 drop rule from the last group):
- * 1)Allow untagged traffic with smac=original mac.
- * 2)Allow untagged traffic.
- * 3)Allow traffic with smac=original mac.
- * 4)Drop all other traffic.
+ * 1 allow rule from one of the first 2 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with/without smac=original mac.
+ * 2)Allow tagged (VLAN trunk list) traffic with/without smac=original mac.
+ * 3)Drop all other traffic.
*/
- int table_size = 4;
+ int table_size = need_vlan_filter ? 8192 : 4;
esw_vport_cleanup_ingress_rules(esw, vport);
- if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
+ need_acl_table = vport->info.vlan || vport->info.qos ||
+ vport->info.spoofchk || need_vlan_filter;
+
+ if (!need_acl_table) {
esw_vport_disable_legacy_ingress_acl(esw, vport);
return 0;
}
+ if ((vport->info.vlan || vport->info.qos) && need_vlan_filter) {
+ mlx5_core_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, Cannot enable both VGT+ and VST\n",
+ vport->vport);
+ return -EPERM;
+ }
+
if (!vport->ingress.acl) {
err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
if (err) {
@@ -1300,7 +1447,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
- if (vport->info.vlan || vport->info.qos)
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ if (vport->info.vlan || vport->info.qos || need_vlan_filter)
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
if (vport->info.spoofchk) {
@@ -1312,20 +1462,52 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
ether_addr_copy(smac_v, vport->info.mac);
}
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- vport->ingress.allow_rule =
- mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.allow_rule)) {
- err = PTR_ERR(vport->ingress.allow_rule);
- esw_warn(esw->dev,
- "vport[%d] configure ingress allow rule, err(%d)\n",
- vport->vport, err);
- vport->ingress.allow_rule = NULL;
- goto out;
+ /* Allow untagged */
+ if (!need_vlan_filter ||
+ (need_vlan_filter && test_bit(0, vport->info.vlan_trunk_8021q_bitmap))) {
+ vport->ingress.allow_untagged_rule =
+ mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.allow_untagged_rule)) {
+ err = PTR_ERR(vport->ingress.allow_untagged_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_untagged_rule = NULL;
+ goto out;
+ }
}
+ if (!need_vlan_filter)
+ goto drop_rule;
+
+ /* Allow tagged (VLAN trunk list) */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+
+ for_each_set_bit(vlan_id, vport->acl_vlan_8021q_bitmap, VLAN_N_VID) {
+ trunk_vlan_rule = kzalloc(sizeof(*trunk_vlan_rule), GFP_KERNEL);
+ if (!trunk_vlan_rule) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+ vlan_id);
+ trunk_vlan_rule->acl_vlan_rule =
+ mlx5_add_flow_rules(vport->ingress.acl, spec, &flow_act, NULL, 0);
+ if (IS_ERR(trunk_vlan_rule->acl_vlan_rule)) {
+ err = PTR_ERR(trunk_vlan_rule->acl_vlan_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress allowed vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ kfree(trunk_vlan_rule);
+ continue;
+ }
+ list_add(&trunk_vlan_rule->list, &vport->ingress.legacy.allowed_vlans_rules);
+ }
+
+drop_rule:
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
@@ -1348,11 +1530,11 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
vport->ingress.legacy.drop_rule = NULL;
goto out;
}
- kvfree(spec);
- return 0;
out:
- esw_vport_disable_legacy_ingress_acl(esw, vport);
+ if (err)
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+
kvfree(spec);
return err;
}
@@ -1365,7 +1547,7 @@ int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec;
int err = 0;
- if (vport->egress.allowed_vlan)
+ if (vport->egress.allowed_vst_vlan)
return -EEXIST;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
@@ -1379,15 +1561,15 @@ int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = flow_action;
- vport->egress.allowed_vlan =
+ vport->egress.allowed_vst_vlan =
mlx5_add_flow_rules(vport->egress.acl, spec,
&flow_act, NULL, 0);
- if (IS_ERR(vport->egress.allowed_vlan)) {
- err = PTR_ERR(vport->egress.allowed_vlan);
+ if (IS_ERR(vport->egress.allowed_vst_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vst_vlan);
esw_warn(esw->dev,
"vport[%d] configure egress vlan rule failed, err(%d)\n",
vport->vport, err);
- vport->egress.allowed_vlan = NULL;
+ vport->egress.allowed_vst_vlan = NULL;
}
kvfree(spec);
@@ -1397,17 +1579,22 @@ int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ bool need_vlan_filter = !!bitmap_weight(vport->info.vlan_trunk_8021q_bitmap,
+ VLAN_N_VID);
+ bool need_acl_table = vport->info.vlan || vport->info.qos || need_vlan_filter;
+ struct mlx5_acl_vlan *trunk_vlan_rule;
struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int dest_num = 0;
+ u16 vlan_id = 0;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
- if (!vport->info.vlan && !vport->info.qos) {
+ if (!need_acl_table) {
esw_vport_disable_egress_acl(esw, vport);
return 0;
}
@@ -1424,17 +1611,67 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- /* Allowed vlan rule */
- err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
- MLX5_FLOW_CONTEXT_ACTION_ALLOW);
- if (err)
- return err;
-
- /* Drop others rule (star rule) */
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
+ if (!spec) {
+ err = -ENOMEM;
goto out;
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ /* Allow untagged */
+ if (need_vlan_filter && test_bit(0, vport->info.vlan_trunk_8021q_bitmap)) {
+ vport->egress.legacy.allow_untagged_rule =
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->egress.legacy.allow_untagged_rule)) {
+ err = PTR_ERR(vport->egress.legacy.allow_untagged_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->egress.legacy.allow_untagged_rule = NULL;
+ }
+ }
+
+ /* VST rule */
+ if (vport->info.vlan || vport->info.qos) {
+ err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+ if (err)
+ goto out;
+ }
+
+ /* Allowed trunk vlans rules (VGT+) */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ for_each_set_bit(vlan_id, vport->acl_vlan_8021q_bitmap, VLAN_N_VID) {
+ trunk_vlan_rule = kzalloc(sizeof(*trunk_vlan_rule), GFP_KERNEL);
+ if (!trunk_vlan_rule) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+ vlan_id);
+ trunk_vlan_rule->acl_vlan_rule =
+ mlx5_add_flow_rules(vport->egress.acl, spec, &flow_act, NULL, 0);
+ if (IS_ERR(trunk_vlan_rule->acl_vlan_rule)) {
+ err = PTR_ERR(trunk_vlan_rule->acl_vlan_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ kfree(trunk_vlan_rule);
+ continue;
+ }
+ list_add(&trunk_vlan_rule->list, &vport->egress.legacy.allowed_vlans_rules);
+ }
+
+ /* Drop others rule (star rule) */
+
+ memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
@@ -1455,7 +1692,11 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
vport->vport, err);
vport->egress.legacy.drop_rule = NULL;
}
+
out:
+ if (err)
+ esw_vport_cleanup_egress_rules(esw, vport);
+
kvfree(spec);
return err;
}
@@ -1787,6 +2028,12 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+ bitmap_zero(vport->req_vlan_bitmap, VLAN_N_VID);
+ bitmap_zero(vport->acl_vlan_8021q_bitmap, VLAN_N_VID);
+ bitmap_zero(vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID);
+ INIT_LIST_HEAD(&vport->egress.legacy.allowed_vlans_rules);
+ INIT_LIST_HEAD(&vport->ingress.legacy.allowed_vlans_rules);
+
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
@@ -2268,6 +2515,17 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->trusted = evport->info.trusted;
ivi->min_tx_rate = evport->info.min_rate;
ivi->max_tx_rate = evport->info.max_rate;
+
+ if (bitmap_weight(evport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID)) {
+ bitmap_copy((unsigned long *)ivi->trunk_8021q,
+ evport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID);
+ ivi->vlan_mode = IFLA_VF_VLAN_MODE_TRUNK;
+ } else if (ivi->vlan) {
+ ivi->vlan_mode = IFLA_VF_VLAN_MODE_VST;
+ } else {
+ ivi->vlan_mode = IFLA_VF_VLAN_MODE_VGT;
+ };
+
mutex_unlock(&esw->state_lock);
return 0;
@@ -2286,6 +2544,14 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan > 4095 || qos > 7)
return -EINVAL;
+ if (bitmap_weight(evport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID)) {
+ err = -EPERM;
+ mlx5_core_warn(esw->dev,
+ "VST is not allowed when operating in VGT+ mode vport(%d)\n",
+ vport);
+ return -EPERM;
+ }
+
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
return err;
@@ -2628,6 +2894,92 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
return 0;
}
+static int mlx5_eswitch_update_vport_trunk(struct mlx5_eswitch *esw,
+ struct mlx5_vport *evport,
+ unsigned long *old_trunk)
+{
+ DECLARE_BITMAP(diff_vlan_bm, VLAN_N_VID);
+ int err = 0;
+
+ bitmap_xor(diff_vlan_bm, old_trunk,
+ evport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID);
+ if (!bitmap_weight(diff_vlan_bm, VLAN_N_VID))
+ return err;
+
+ esw_update_acl_trunk_bitmap(esw, evport->vport);
+ if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
+ err = esw_vport_egress_config(esw, evport);
+ if (!err)
+ err = esw_vport_ingress_config(esw, evport);
+ }
+
+ if (err) {
+ bitmap_copy(evport->info.vlan_trunk_8021q_bitmap, old_trunk, VLAN_N_VID);
+ esw_update_acl_trunk_bitmap(esw, evport->vport);
+ esw_vport_egress_config(esw, evport);
+ esw_vport_ingress_config(esw, evport);
+ }
+
+ return err;
+}
+
+int mlx5_eswitch_add_vport_trunk_range(struct mlx5_eswitch *esw,
+ u16 vport, u16 start_vlan, u16 end_vlan)
+{
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID);
+ int err = 0;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+
+ if (IS_ERR(evport) || end_vlan > VLAN_N_VID || start_vlan > end_vlan)
+ return -EINVAL;
+
+ mutex_lock(&esw->state_lock);
+
+ if (evport->info.vlan || evport->info.qos) {
+ err = -EPERM;
+ mlx5_core_warn(esw->dev,
+ "VGT+ is not allowed when operating in VST mode vport(%d)\n",
+ vport);
+ goto unlock;
+ }
+
+ bitmap_copy(prev_vport_bitmap, evport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID);
+ bitmap_set(evport->info.vlan_trunk_8021q_bitmap, start_vlan,
+ end_vlan - start_vlan + 1);
+ err = mlx5_eswitch_update_vport_trunk(esw, evport, prev_vport_bitmap);
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+
+ return err;
+}
+
+int mlx5_eswitch_del_vport_trunk_range(struct mlx5_eswitch *esw,
+ u16 vport, u16 start_vlan, u16 end_vlan)
+{
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID);
+ int err = 0;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+
+ if (IS_ERR(evport) || end_vlan > VLAN_N_VID || start_vlan > end_vlan)
+ return -EINVAL;
+
+ mutex_lock(&esw->state_lock);
+ bitmap_copy(prev_vport_bitmap, evport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID);
+ bitmap_clear(evport->info.vlan_trunk_8021q_bitmap, start_vlan,
+ end_vlan - start_vlan + 1);
+ err = mlx5_eswitch_update_vport_trunk(esw, evport, prev_vport_bitmap);
+ mutex_unlock(&esw->state_lock);
+
+ return err;
+}
+
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
u16 vport_num,
struct ifla_vf_stats *vf_stats)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 920d8f5..1ba7aa3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -35,6 +35,8 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
+#include <linux/if_vlan.h>
+#include <linux/bitmap.h>
#include <linux/atomic.h>
#include <net/devlink.h>
#include <linux/mlx5/device.h>
@@ -51,6 +53,9 @@
#define MLX5_MAX_MC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
+#define MLX5_MAX_VLAN_PER_VPORT(dev) \
+ (1 << MLX5_CAP_GEN(dev, log_max_vlan_list))
+
#define MLX5_MIN_BW_SHARE 1
#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
@@ -65,14 +70,14 @@
struct vport_ingress {
struct mlx5_flow_table *acl;
- struct mlx5_flow_handle *allow_rule;
+ struct mlx5_flow_handle *allow_untagged_rule;
struct {
- struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
- struct mlx5_flow_group *allow_untagged_only_grp;
+ struct mlx5_flow_group *allow_tagged_spoofchk_grp;
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *drop_rule;
struct mlx5_fc *drop_counter;
+ struct list_head allowed_vlans_rules;
} legacy;
struct {
struct mlx5_flow_group *metadata_grp;
@@ -83,11 +88,14 @@ struct vport_ingress {
struct vport_egress {
struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *allow_untagged_grp;
struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp;
- struct mlx5_flow_handle *allowed_vlan;
+ struct mlx5_flow_handle *allowed_vst_vlan;
struct {
struct mlx5_flow_handle *drop_rule;
+ struct list_head allowed_vlans_rules;
+ struct mlx5_flow_handle *allow_untagged_rule;
struct mlx5_fc *drop_counter;
} legacy;
};
@@ -107,12 +115,15 @@ struct mlx5_vport_info {
u32 max_rate;
bool spoofchk;
bool trusted;
+ /* the admin approved vlan list */
+ DECLARE_BITMAP(vlan_trunk_8021q_bitmap, VLAN_N_VID);
};
/* Vport context events */
enum mlx5_eswitch_vport_event {
MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
+ MLX5_VPORT_VLAN_CHANGE = BIT(2),
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
@@ -121,6 +132,10 @@ struct mlx5_vport {
int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
+ /* The requested vlan list from the vport side */
+ DECLARE_BITMAP(req_vlan_bitmap, VLAN_N_VID);
+ /* Actual accepted vlans on the acl tables */
+ DECLARE_BITMAP(acl_vlan_8021q_bitmap, VLAN_N_VID);
struct mlx5_flow_handle *promisc_rule;
struct mlx5_flow_handle *allmulti_rule;
struct work_struct vport_change_handler;
@@ -292,6 +307,10 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
u16 vport, struct ifla_vf_info *ivi);
+int mlx5_eswitch_add_vport_trunk_range(struct mlx5_eswitch *esw,
+ u16 vport, u16 start_vlan, u16 end_vlan);
+int mlx5_eswitch_del_vport_trunk_range(struct mlx5_eswitch *esw,
+ u16 vport, u16 start_vlan, u16 end_vlan);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
u16 vport,
struct ifla_vf_stats *vf_stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9924f06..2db872a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1783,15 +1783,15 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
}
- vport->ingress.allow_rule =
+ vport->ingress.allow_untagged_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.allow_rule)) {
- err = PTR_ERR(vport->ingress.allow_rule);
+ if (IS_ERR(vport->ingress.allow_untagged_rule)) {
+ err = PTR_ERR(vport->ingress.allow_untagged_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress untagged allow rule, err(%d)\n",
vport->vport, err);
- vport->ingress.allow_rule = NULL;
+ vport->ingress.allow_untagged_rule = NULL;
goto out;
}
--
1.8.3.1
Powered by blists - more mailing lists