[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <uovifydwz7vbhbjzy4g4x4lkrq7htepoktekqidqxytkqi6ra6@2xfhgel6h7sz>
Date: Fri, 28 Feb 2025 13:46:05 +0100
From: Jiri Pirko <jiri@...nulli.us>
To: Saeed Mahameed <saeed@...nel.org>
Cc: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Eric Dumazet <edumazet@...gle.com>, Saeed Mahameed <saeedm@...dia.com>, netdev@...r.kernel.org,
Tariq Toukan <tariqt@...dia.com>, Gal Pressman <gal@...dia.com>,
Leon Romanovsky <leonro@...dia.com>, Jiri Pirko <jiri@...dia.com>,
Vlad Dumitrescu <vdumitrescu@...dia.com>
Subject: Re: [PATCH net-next 04/14] net/mlx5: Implement devlink enable_sriov
parameter
Fri, Feb 28, 2025 at 03:12:17AM +0100, saeed@...nel.org wrote:
>From: Vlad Dumitrescu <vdumitrescu@...dia.com>
>
>Example usage:
> devlink dev param set pci/0000:01:00.0 name enable_sriov value {true, false} cmode permanent
> devlink dev reload pci/0000:01:00.0 action fw_activate
> echo 1 >/sys/bus/pci/devices/0000:01:00.0/remove
> echo 1 >/sys/bus/pci/rescan
> grep ^ /sys/bus/pci/devices/0000:01:00.0/sriov_*
>
>Signed-off-by: Vlad Dumitrescu <vdumitrescu@...dia.com>
>Signed-off-by: Saeed Mahameed <saeedm@...dia.com>
>---
> Documentation/networking/devlink/mlx5.rst | 14 +-
> .../net/ethernet/mellanox/mlx5/core/devlink.c | 1 +
> .../mellanox/mlx5/core/lib/nv_param.c | 184 ++++++++++++++++++
> 3 files changed, 196 insertions(+), 3 deletions(-)
>
>diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
>index 417e5cdcd35d..587e0200c1cd 100644
>--- a/Documentation/networking/devlink/mlx5.rst
>+++ b/Documentation/networking/devlink/mlx5.rst
>@@ -15,23 +15,31 @@ Parameters
> * - Name
> - Mode
> - Validation
>+ - Notes
> * - ``enable_roce``
> - driverinit
>- - Type: Boolean
>-
>- If the device supports RoCE disablement, RoCE enablement state controls
>+ - Boolean
>+ - If the device supports RoCE disablement, RoCE enablement state controls
> device support for RoCE capability. Otherwise, the control occurs in the
> driver stack. When RoCE is disabled at the driver level, only raw
> ethernet QPs are supported.
> * - ``io_eq_size``
> - driverinit
> - The range is between 64 and 4096.
>+ -
> * - ``event_eq_size``
> - driverinit
> - The range is between 64 and 4096.
>+ -
> * - ``max_macs``
> - driverinit
> - The range is between 1 and 2^31. Only power of 2 values are supported.
>+ -
>+ * - ``enable_sriov``
>+ - permanent
>+ - Boolean
>+ - Applies to each physical function (PF) independently, if the device
>+ supports it. Otherwise, it applies symmetrically to all PFs.
>
> The ``mlx5`` driver also implements the following driver-specific
> parameters.
>diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
>index 1f764ae4f4aa..7a702d84f19a 100644
>--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
>+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
>@@ -8,6 +8,7 @@
> #include "fs_core.h"
> #include "eswitch.h"
> #include "esw/qos.h"
>+#include "lib/nv_param.h"
> #include "sf/dev/dev.h"
> #include "sf/sf.h"
> #include "lib/nv_param.h"
>diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
>index 5ab37a88c260..6b63fc110e2d 100644
>--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
>+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
>@@ -5,7 +5,11 @@
> #include "mlx5_core.h"
>
> enum {
>+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF = 0x80,
>+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP = 0x81,
> MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG = 0x10a,
>+
>+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF = 0x80,
> };
>
> struct mlx5_ifc_configuration_item_type_class_global_bits {
>@@ -13,9 +17,18 @@ struct mlx5_ifc_configuration_item_type_class_global_bits {
> u8 parameter_index[0x18];
> };
>
>+struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits {
>+ u8 type_class[0x8];
>+ u8 pf_index[0x6];
>+ u8 pci_bus_index[0x8];
>+ u8 parameter_index[0xa];
>+};
>+
> union mlx5_ifc_config_item_type_auto_bits {
> struct mlx5_ifc_configuration_item_type_class_global_bits
> configuration_item_type_class_global;
>+ struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits
>+ configuration_item_type_class_per_host_pf;
> u8 reserved_at_0[0x20];
> };
>
>@@ -45,6 +58,45 @@ struct mlx5_ifc_mnvda_reg_bits {
> u8 configuration_item_data[64][0x20];
> };
>
>+struct mlx5_ifc_nv_global_pci_conf_bits {
>+ u8 sriov_valid[0x1];
>+ u8 reserved_at_1[0x10];
>+ u8 per_pf_total_vf[0x1];
>+ u8 reserved_at_12[0xe];
>+
>+ u8 sriov_en[0x1];
>+ u8 reserved_at_21[0xf];
>+ u8 total_vfs[0x10];
>+
>+ u8 reserved_at_40[0x20];
>+};
>+
>+struct mlx5_ifc_nv_global_pci_cap_bits {
>+ u8 max_vfs_per_pf_valid[0x1];
>+ u8 reserved_at_1[0x13];
>+ u8 per_pf_total_vf_supported[0x1];
>+ u8 reserved_at_15[0xb];
>+
>+ u8 sriov_support[0x1];
>+ u8 reserved_at_21[0xf];
>+ u8 max_vfs_per_pf[0x10];
>+
>+ u8 reserved_at_40[0x60];
>+};
>+
>+struct mlx5_ifc_nv_pf_pci_conf_bits {
>+ u8 reserved_at_0[0x9];
>+ u8 pf_total_vf_en[0x1];
>+ u8 reserved_at_a[0x16];
>+
>+ u8 reserved_at_20[0x20];
>+
>+ u8 reserved_at_40[0x10];
>+ u8 total_vf[0x10];
>+
>+ u8 reserved_at_60[0x20];
>+};
>+
> struct mlx5_ifc_nv_sw_offload_conf_bits {
> u8 ip_over_vxlan_port[0x10];
> u8 tunnel_ecn_copy_offload_disable[0x1];
>@@ -206,7 +258,139 @@ static int mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 i
> return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
> }
>
>+static int
>+mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev, void *mnvda, size_t len)
>+{
>+ MLX5_SET_CONFIG_ITEM_TYPE(global, mnvda, type_class, 0);
>+ MLX5_SET_CONFIG_ITEM_TYPE(global, mnvda, parameter_index,
>+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF);
>+ MLX5_SET_CONFIG_HDR_LEN(mnvda, nv_global_pci_conf);
>+
>+ return mlx5_nv_param_read(dev, mnvda, len);
>+}
>+
>+static int
>+mlx5_nv_param_read_global_pci_cap(struct mlx5_core_dev *dev, void *mnvda, size_t len)
>+{
>+ MLX5_SET_CONFIG_ITEM_TYPE(global, mnvda, type_class, 0);
>+ MLX5_SET_CONFIG_ITEM_TYPE(global, mnvda, parameter_index,
>+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP);
>+ MLX5_SET_CONFIG_HDR_LEN(mnvda, nv_global_pci_cap);
>+
>+ return mlx5_nv_param_read(dev, mnvda, len);
>+}
>+
>+static int
>+mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev, void *mnvda, size_t len)
>+{
>+ MLX5_SET_CONFIG_ITEM_TYPE(per_host_pf, mnvda, type_class, 3);
>+ MLX5_SET_CONFIG_ITEM_TYPE(per_host_pf, mnvda, parameter_index,
>+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF);
>+ MLX5_SET_CONFIG_HDR_LEN(mnvda, nv_pf_pci_conf);
>+
>+ return mlx5_nv_param_read(dev, mnvda, len);
>+}
>+
>+static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id,
>+ struct devlink_param_gset_ctx *ctx)
>+{
>+ struct mlx5_core_dev *dev = devlink_priv(devlink);
>+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
>+ void *data;
>+ int err;
>+
>+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
>+ if (err)
>+ return err;
>+
>+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
>+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
>+ ctx->val.vbool = false;
>+ return 0;
>+ }
>+
>+ memset(mnvda, 0, sizeof(mnvda));
>+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
>+ if (err)
>+ return err;
>+
>+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
>+ if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
>+ ctx->val.vbool = MLX5_GET(nv_global_pci_conf, data, sriov_en);
>+ return 0;
>+ }
>+
>+ /* SRIOV is per PF */
>+ memset(mnvda, 0, sizeof(mnvda));
>+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
>+ if (err)
>+ return err;
>+
>+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
>+ ctx->val.vbool = MLX5_GET(nv_pf_pci_conf, data, pf_total_vf_en);
>+ return 0;
>+}
>+
>+static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id,
>+ struct devlink_param_gset_ctx *ctx,
>+ struct netlink_ext_ack *extack)
>+{
>+ struct mlx5_core_dev *dev = devlink_priv(devlink);
>+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
>+ bool per_pf_support;
>+ void *cap, *data;
>+ int err;
>+
>+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
>+ if (err) {
>+ NL_SET_ERR_MSG_MOD(extack, "Failed to read global PCI capability");
>+ return err;
>+ }
>+
>+ cap = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
>+ per_pf_support = MLX5_GET(nv_global_pci_cap, cap, per_pf_total_vf_supported);
>+
>+ if (!MLX5_GET(nv_global_pci_cap, cap, sriov_support)) {
>+ NL_SET_ERR_MSG_MOD(extack, "Not configurable on this device");
>+ return -EOPNOTSUPP;
>+ }
>+
>+ memset(mnvda, 0, sizeof(mnvda));
>+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
>+ if (err) {
>+ NL_SET_ERR_MSG_MOD(extack, "Unable to read global PCI configuration");
>+ return err;
>+ }
>+
>+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
>+ MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
>+ MLX5_SET(nv_global_pci_conf, data, sriov_en, ctx->val.vbool);
>+ MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, per_pf_support);
>+
>+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
>+ if (err) {
>+ NL_SET_ERR_MSG_MOD(extack, "Unable to write global PCI configuration");
>+ return err;
>+ }
>+
>+ if (!per_pf_support)
Hmm, given the discussion we have in parallel about some shared-PF
devlink instance, perhaps it would be good idea to allow only per-pf
configuration here for now and let the "global" per-device configuration
knob to be attached on the shared-PF devlink, when/if it lands. What do
you think?
>+ return 0;
>+
>+ /* SRIOV is per PF */
>+ memset(mnvda, 0, sizeof(mnvda));
>+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
>+ if (err) {
>+ NL_SET_ERR_MSG_MOD(extack, "Unable to read per host PF configuration");
>+ return err;
>+ }
>+ MLX5_SET(nv_pf_pci_conf, data, pf_total_vf_en, ctx->val.vbool);
>+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
>+}
>+
> static const struct devlink_param mlx5_nv_param_devlink_params[] = {
>+ DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
>+ mlx5_devlink_enable_sriov_get,
>+ mlx5_devlink_enable_sriov_set, NULL),
> DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
> "cqe_compress_type", DEVLINK_PARAM_TYPE_STRING,
> BIT(DEVLINK_PARAM_CMODE_PERMANENT),
>--
>2.48.1
>
>
Powered by blists - more mailing lists