[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1658941416-74393-8-git-send-email-moshe@nvidia.com>
Date: Wed, 27 Jul 2022 20:03:34 +0300
From: Moshe Shemesh <moshe@...dia.com>
To: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Saeed Mahameed <saeedm@...dia.com>,
Leon Romanovsky <leon@...nel.org>,
"Tariq Toukan" <tariqt@...dia.com>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>
CC: Jiri Pirko <jiri@...dia.com>, <netdev@...r.kernel.org>,
Moshe Shemesh <moshe@...dia.com>
Subject: [PATCH net-next 7/9] net/mlx4: Lock mlx4 devlink reload callback
Change devlink instance locks in mlx4 driver to have devlink reload
callback locked, while keeping all driver paths which leads to devl_ API
functions called by the mlx4 driver locked.
Signed-off-by: Moshe Shemesh <moshe@...dia.com>
Reviewed-by: Tariq Toukan <tariqt@...dia.com>
Reviewed-by: Jiri Pirko <jiri@...dia.com>
---
drivers/net/ethernet/mellanox/mlx4/catas.c | 5 +++
drivers/net/ethernet/mellanox/mlx4/main.c | 45 +++++++++++++++-------
2 files changed, 37 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 5b11557f1ae4..0eb7b83637d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -204,9 +204,13 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
{
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
int err = 0;
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
@@ -215,6 +219,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
err);
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
}
static void dump_err_buf(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 0166d003f22c..2c764d1d897d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3342,6 +3342,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
+ devl_assert_locked(devlink);
dev = &priv->dev;
INIT_LIST_HEAD(&priv->ctx_list);
@@ -3630,7 +3631,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
}
}
- devl_lock(devlink);
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
if (err)
@@ -3644,7 +3644,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
if (err)
goto err_port;
- devl_unlock(devlink);
mlx4_request_modules(dev);
mlx4_sense_init(dev);
@@ -3661,7 +3660,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
err_port:
for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
- devl_unlock(devlink);
mlx4_cleanup_default_counters(dev);
if (!mlx4_is_slave(dev))
@@ -3736,7 +3734,6 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
{2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
- struct devlink *devlink = priv_to_devlink(priv);
unsigned total_vfs = 0;
unsigned int i;
@@ -3849,9 +3846,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
}
}
- devl_lock(devlink);
err = mlx4_crdump_init(&priv->dev);
- devl_unlock(devlink);
if (err)
goto err_release_regions;
@@ -3869,9 +3864,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
mlx4_catas_end(&priv->dev);
err_crdump:
- devl_lock(devlink);
mlx4_crdump_end(&priv->dev);
- devl_unlock(devlink);
err_release_regions:
pci_release_regions(pdev);
@@ -3965,9 +3958,11 @@ static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported");
return -EOPNOTSUPP;
}
+ devl_lock(devlink);
if (persist->num_vfs)
mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
mlx4_restart_one_down(persist->pdev);
+ devl_unlock(devlink);
return 0;
}
@@ -3980,8 +3975,10 @@ static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
struct mlx4_dev_persistent *persist = dev->persist;
int err;
+ devl_lock(devlink);
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
err = mlx4_restart_one_up(persist->pdev, true, devlink);
+ devl_unlock(devlink);
if (err)
mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
err);
@@ -4008,6 +4005,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
if (!devlink)
return -ENOMEM;
+ devl_lock(devlink);
priv = devlink_priv(devlink);
dev = &priv->dev;
@@ -4035,6 +4033,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -4044,6 +4043,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
+ devl_unlock(devlink);
devlink_free(devlink);
return ret;
}
@@ -4069,6 +4069,7 @@ static void mlx4_unload_one(struct pci_dev *pdev)
int p, i;
devlink = priv_to_devlink(priv);
+ devl_assert_locked(devlink);
if (priv->removed)
return;
@@ -4084,12 +4085,10 @@ static void mlx4_unload_one(struct pci_dev *pdev)
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
- devl_lock(devlink);
for (p = 1; p <= dev->caps.num_ports; p++) {
mlx4_cleanup_port_info(&priv->port[p]);
mlx4_CLOSE_PORT(dev, p);
}
- devl_unlock(devlink);
if (mlx4_is_master(dev))
mlx4_free_resource_tracker(dev,
@@ -4150,6 +4149,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_unregister(devlink);
+ devl_lock(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4174,9 +4174,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
else
mlx4_info(dev, "%s: interface is down\n", __func__);
mlx4_catas_end(dev);
- devl_lock(devlink);
mlx4_crdump_end(dev);
- devl_unlock(devlink);
if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev);
@@ -4187,6 +4185,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
+ devl_unlock(devlink);
devlink_free(devlink);
}
@@ -4307,15 +4306,20 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4348,6 +4352,7 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int err;
@@ -4355,6 +4360,8 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
@@ -4373,19 +4380,23 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
}
end:
mutex_unlock(&persist->interface_state_mutex);
-
+ devl_unlock(devlink);
}
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
mlx4_pci_disable_device(dev);
}
@@ -4400,12 +4411,16 @@ static int __maybe_unused mlx4_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(dev, "suspend was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return 0;
}
@@ -4417,6 +4432,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int ret = 0;
@@ -4424,6 +4440,8 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
@@ -4437,6 +4455,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
}
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return ret;
}
--
2.18.2
Powered by blists - more mailing lists