[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230610014254.343576-7-saeed@kernel.org>
Date: Fri, 9 Jun 2023 18:42:45 -0700
From: Saeed Mahameed <saeed@...nel.org>
To: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Eric Dumazet <edumazet@...gle.com>
Cc: Saeed Mahameed <saeedm@...dia.com>,
netdev@...r.kernel.org,
Tariq Toukan <tariqt@...dia.com>,
Daniel Jurgens <danielj@...dia.com>,
William Tu <witu@...dia.com>,
Roi Dayan <roid@...dia.com>
Subject: [net-next 06/15] net/mlx5: Add/remove peer miss rules for EC VFs
From: Daniel Jurgens <danielj@...dia.com>
Add and remove the peer miss rules for EC VFs. It's possible that there
are different amounts of total VFs per function so only create rules for
the minimum number of max VFs.
Signed-off-by: Daniel Jurgens <danielj@...dia.com>
Reviewed-by: William Tu <witu@...dia.com>
Reviewed-by: Roi Dayan <roid@...dia.com>
Signed-off-by: Saeed Mahameed <saeedm@...dia.com>
---
.../mellanox/mlx5/core/eswitch_offloads.c | 32 +++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 68798aed792f..fdf482f6fb34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1125,11 +1125,32 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[vport->index] = flow;
}
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
+ if (i >= mlx5_core_max_ec_vfs(peer_dev))
+ break;
+ esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
+ spec, vport->vport);
+ flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto add_ec_vf_flow_err;
+ }
+ flows[vport->index] = flow;
+ }
+ }
esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
kvfree(spec);
return 0;
+add_ec_vf_flow_err:
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
+ if (!flows[vport->index])
+ continue;
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
add_vf_flow_err:
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
if (!flows[vport->index])
@@ -1162,6 +1183,17 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows = esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)];
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
+ /* The flow for a particular vport could be NULL if the other ECPF
+ * has fewer or no VFs enabled
+ */
+ if (!flows[vport->index])
+ continue;
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
+ }
+
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
mlx5_del_flow_rules(flows[vport->index]);
--
2.40.1
Powered by blists - more mailing lists