[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190822233514.31252-4-saeedm@mellanox.com>
Date: Thu, 22 Aug 2019 23:35:50 +0000
From: Saeed Mahameed <saeedm@...lanox.com>
To: "David S. Miller" <davem@...emloft.net>
CC: "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
Maxim Mikityanskiy <maximmi@...lanox.com>,
Tariq Toukan <tariqt@...lanox.com>,
Mark Bloch <markb@...lanox.com>,
Saeed Mahameed <saeedm@...lanox.com>
Subject: [net-next 3/8] net/mlx5e: Support LAG TX port affinity distribution
From: Maxim Mikityanskiy <maximmi@...lanox.com>
When the VF LAG is in use, round-robin the TX affinity of channels among
the different ports, if supported by the firmware. Create a set of TISes
per port, while doing round-robin of the channels over the different
sets. Let all SQs of a channel share the same set of TISes.
If lag_tx_port_affinity HCA cap bit is supported, num_lag_ports > 1 and
we aren't the LACP owner (PF in the regular use), assign the affinities,
otherwise use tx_affinity == 0 in TIS context to let the FW assign the
affinities itself. The TISes of the LACP owner are mapped only to the
native physical port.
For VFs, the starting port for round-robin is determined by its vhca_id,
because a VF may have only one channel if attached to a single-core VM.
Signed-off-by: Maxim Mikityanskiy <maximmi@...lanox.com>
Signed-off-by: Tariq Toukan <tariqt@...lanox.com>
Signed-off-by: Mark Bloch <markb@...lanox.com>
Reviewed-by: Tariq Toukan <tariqt@...lanox.com>
Signed-off-by: Saeed Mahameed <saeedm@...lanox.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en.h | 11 +++-
.../net/ethernet/mellanox/mlx5/core/en_main.c | 54 +++++++++++++------
.../ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 4 +-
.../mellanox/mlx5/core/ipoib/ipoib_vlan.c | 6 +--
4 files changed, 53 insertions(+), 22 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 491c281416d0..e03f973c962f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -163,6 +163,14 @@ enum mlx5e_rq_group {
#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
};
+static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
+{
+ if (mlx5_lag_is_lacp_owner(mdev))
+ return 1;
+
+ return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
+}
+
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{
switch (wq_type) {
@@ -705,6 +713,7 @@ struct mlx5e_channel {
struct net_device *netdev;
__be32 mkey_be;
u8 num_tc;
+ u8 lag_port;
/* XDP_REDIRECT */
struct mlx5e_xdpsq xdpsq;
@@ -818,7 +827,7 @@ struct mlx5e_priv {
struct mlx5e_rq drop_rq;
struct mlx5e_channels channels;
- u32 tisn[MLX5E_MAX_NUM_TC];
+ u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rqt indir_rqt;
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d0cda5181ab4..b1bc0e601cc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1442,7 +1442,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
return err;
csp.tis_lst_sz = 1;
- csp.tisn = c->priv->tisn[0]; /* tc = 0 */
+ csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
@@ -1692,7 +1692,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = c->ix + tc * priv->max_nch;
- err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
+ err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->sq, &c->sq[tc], tc);
if (err)
goto err_close_sqs;
@@ -1926,6 +1926,13 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_cq(&c->icosq.cq);
}
+static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
+{
+ u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
+
+ return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -1960,6 +1967,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
c->irq_desc = irq_to_desc(irq);
+ c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
err = mlx5e_alloc_xps_cpumask(c, params);
if (err)
@@ -3181,35 +3189,49 @@ void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
void mlx5e_destroy_tises(struct mlx5e_priv *priv)
{
- int tc;
+ int tc, i;
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
+}
+
+static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
}
int mlx5e_create_tises(struct mlx5e_priv *priv)
{
+ int tc, i;
int err;
- int tc;
- for (tc = 0; tc < priv->profile->max_tc; tc++) {
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
+ for (tc = 0; tc < priv->profile->max_tc; tc++) {
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- MLX5_SET(tisc, tisc, prio, tc << 1);
+ MLX5_SET(tisc, tisc, prio, tc << 1);
- err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[tc]);
- if (err)
- goto err_close_tises;
+ if (mlx5e_lag_should_assign_affinity(priv->mdev))
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
+
+ err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
+ if (err)
+ goto err_close_tises;
+ }
}
return 0;
err_close_tises:
- for (tc--; tc >= 0; tc--)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ for (; i >= 0; i--) {
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
+ tc = priv->profile->max_tc;
+ }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 1a2560e3bf7c..3ed8ab2d703d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -279,7 +279,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
- err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -296,7 +296,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
- mlx5e_destroy_tis(priv->mdev, priv->tisn[0]);
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index c5a491e22e55..96e64187c089 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -210,7 +210,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_unint_underlay_qp;
}
- err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0]);
+ err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -228,7 +228,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
return 0;
err_clear_state_opened_flag:
- mlx5e_destroy_tis(mdev, epriv->tisn[0]);
+ mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
err_remove_rx_uderlay_qp:
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
err_unint_underlay_qp:
@@ -257,7 +257,7 @@ static int mlx5i_pkey_close(struct net_device *netdev)
mlx5i_uninit_underlay_qp(priv);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
- mlx5e_destroy_tis(mdev, priv->tisn[0]);
+ mlx5e_destroy_tis(mdev, priv->tisn[0][0]);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
--
2.21.0
Powered by blists - more mailing lists