[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200228004446.159497-9-saeedm@mellanox.com>
Date: Thu, 27 Feb 2020 16:44:38 -0800
From: Saeed Mahameed <saeedm@...lanox.com>
To: "David S. Miller" <davem@...emloft.net>, kuba@...nel.org
Cc: netdev@...r.kernel.org, Jianbo Liu <jianbol@...lanox.com>,
Vlad Buslov <vladbu@...lanox.com>,
Roi Dayan <roid@...lanox.com>, Jiri Pirko <jiri@...lanox.com>,
Saeed Mahameed <saeedm@...lanox.com>
Subject: [net-next 08/16] net/mlx5e: Add devlink fdb_large_groups parameter
From: Jianbo Liu <jianbol@...lanox.com>
Add a devlink parameter to control the number of large groups in a
autogrouped flow table. The default value is 15, and the range is between 1
and 1024.
The size of each large group can be calculated according to the following
formula: size = 4M / (fdb_large_groups + 1).
Examples:
- Set the number of large groups to 20.
$ devlink dev param set pci/0000:82:00.0 name fdb_large_groups \
cmode driverinit value 20
Then run devlink reload command to apply the new value.
$ devlink dev reload pci/0000:82:00.0
- Read the number of large groups in flow table.
$ devlink dev param show pci/0000:82:00.0 name fdb_large_groups
pci/0000:82:00.0:
name fdb_large_groups type driver-specific
values:
cmode driverinit value 20
Signed-off-by: Jianbo Liu <jianbol@...lanox.com>
Reviewed-by: Vlad Buslov <vladbu@...lanox.com>
Reviewed-by: Roi Dayan <roid@...lanox.com>
Acked-by: Jiri Pirko <jiri@...lanox.com>
Signed-off-by: Saeed Mahameed <saeedm@...lanox.com>
---
Documentation/networking/devlink/mlx5.rst | 6 ++++
.../net/ethernet/mellanox/mlx5/core/devlink.c | 36 ++++++++++++++++---
.../net/ethernet/mellanox/mlx5/core/devlink.h | 6 ++++
.../net/ethernet/mellanox/mlx5/core/eswitch.c | 22 ++++++++++++
.../net/ethernet/mellanox/mlx5/core/eswitch.h | 6 +++-
.../mellanox/mlx5/core/eswitch_offloads.c | 4 ++-
.../mlx5/core/eswitch_offloads_chains.c | 4 +--
7 files changed, 75 insertions(+), 9 deletions(-)
diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
index 629a6e69c036..4e4b97f7971a 100644
--- a/Documentation/networking/devlink/mlx5.rst
+++ b/Documentation/networking/devlink/mlx5.rst
@@ -37,6 +37,12 @@ parameters.
* ``smfs`` Software managed flow steering. In SMFS mode, the HW
steering entities are created and manage through the driver without
firmware intervention.
+ * - ``fdb_large_groups``
+ - u32
+ - driverinit
+ - Control the number of large groups (size > 1) in the FDB table.
+
+ * The default value is 15, and the range is between 1 and 1024.
The ``mlx5`` driver supports reloading via ``DEVLINK_CMD_RELOAD``
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index ca7f08513174..b7bb81b8c49b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -190,11 +190,6 @@ static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
return 0;
}
-enum mlx5_devlink_param_id {
- MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
-};
-
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -210,6 +205,23 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
return 0;
}
+#ifdef CONFIG_MLX5_ESWITCH
+static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int group_num = val.vu32;
+
+ if (group_num < 1 || group_num > 1024) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported group number, supported range is 1-1024");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
@@ -218,6 +230,13 @@ static const struct devlink_param mlx5_devlink_params[] = {
mlx5_devlink_fs_mode_validate),
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate),
+#ifdef CONFIG_MLX5_ESWITCH
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ "fdb_large_groups", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ mlx5_devlink_large_group_num_validate),
+#endif
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -237,6 +256,13 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
value);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
+ devlink_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ value);
+#endif
}
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index d0ba03774ddf..f0de327a59be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -6,6 +6,12 @@
#include <net/devlink.h>
+enum mlx5_devlink_param_id {
+ MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+};
+
struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index e49acd0c5da5..25640864c375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -39,6 +39,7 @@
#include "lib/eq.h"
#include "eswitch.h"
#include "fs_core.h"
+#include "devlink.h"
#include "ecpf.h"
enum {
@@ -2006,6 +2007,25 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
esw_disable_vport(esw, vport);
}
+static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
+{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ &val);
+ if (!err) {
+ esw->params.large_group_num = val.vu32;
+ } else {
+ esw_warn(esw->dev,
+ "Devlink can't get param fdb_large_groups, uses default (%d).\n",
+ ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
+ esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
+ }
+}
+
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
int err;
@@ -2022,6 +2042,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
+ mlx5_eswitch_get_devlink_param(esw);
+
esw_create_tsar(esw);
esw->mode = mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 479d2458f872..d010657ce601 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -49,13 +49,14 @@
/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
-#define ESW_OFFLOADS_NUM_GROUPS 4
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
#ifdef CONFIG_MLX5_ESWITCH
+#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
+
#define MLX5_MAX_UC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
@@ -262,6 +263,9 @@ struct mlx5_eswitch {
u16 manager_vport;
u16 first_host_vport;
struct mlx5_esw_functions esw_funcs;
+ struct {
+ u32 large_group_num;
+ } params;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9a72c719d8f5..4b5b6618dff4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -71,13 +71,15 @@ struct mlx5_vport_table {
struct mlx5_vport_key key;
};
+#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
+
static struct mlx5_flow_table *
esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *fdb;
- ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
+ ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
ft_attr.prio = FDB_PER_VPORT;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
index 60121f2ee6c5..d41e4f002b84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
@@ -237,7 +237,7 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
}
ft_attr.autogroup.num_reserved_entries = 2;
- ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
+ ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev,
@@ -640,7 +640,7 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw_debug(dev,
"Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max);
+ max_flow_counter, esw->params.large_group_num, fdb_max);
mlx5_esw_chains_init_sz_pool(esw);
--
2.24.1
Powered by blists - more mailing lists