lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue,  7 Feb 2023 16:36:59 -0800
From:   Saeed Mahameed <saeed@...nel.org>
To:     "David S. Miller" <davem@...emloft.net>,
        Jakub Kicinski <kuba@...nel.org>,
        Paolo Abeni <pabeni@...hat.com>,
        Eric Dumazet <edumazet@...gle.com>
Cc:     Saeed Mahameed <saeedm@...dia.com>, netdev@...r.kernel.org,
        Tariq Toukan <tariqt@...dia.com>,
        Shay Drory <shayd@...dia.com>, Moshe Shemesh <moshe@...dia.com>
Subject: [net-next 02/15] net/mlx5: Remove redundant health work lock

From: Shay Drory <shayd@...dia.com>

Commit 90e7cb78b815 ("net/mlx5: fix missing mutex_unlock in
mlx5_fw_fatal_reporter_err_work()") introduced another checking of
MLX5_DROP_HEALTH_NEW_WORK. At this point, the first check of
MLX5_DROP_HEALTH_NEW_WORK is redundant and so is the lock that
protects it.

Remove the lock and rename MLX5_DROP_HEALTH_NEW_WORK to reflect these
changes.

Signed-off-by: Shay Drory <shayd@...dia.com>
Reviewed-by: Moshe Shemesh <moshe@...dia.com>
Signed-off-by: Saeed Mahameed <saeedm@...dia.com>
---
 .../net/ethernet/mellanox/mlx5/core/health.c  | 28 +++++--------------
 include/linux/mlx5/driver.h                   |  2 --
 2 files changed, 7 insertions(+), 23 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 879555ba847d..1e8bee906c31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -62,7 +62,7 @@ enum {
 };
 
 enum {
-	MLX5_DROP_NEW_HEALTH_WORK,
+	MLX5_DROP_HEALTH_WORK,
 };
 
 enum  {
@@ -675,7 +675,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
 	devlink = priv_to_devlink(dev);
 
 	mutex_lock(&dev->intf_state_mutex);
-	if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
+	if (test_bit(MLX5_DROP_HEALTH_WORK, &health->flags)) {
 		mlx5_core_err(dev, "health works are not permitted at this stage\n");
 		mutex_unlock(&dev->intf_state_mutex);
 		return;
@@ -771,14 +771,8 @@ static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev)
 void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
 {
 	struct mlx5_core_health *health = &dev->priv.health;
-	unsigned long flags;
 
-	spin_lock_irqsave(&health->wq_lock, flags);
-	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
-		queue_work(health->wq, &health->fatal_report_work);
-	else
-		mlx5_core_err(dev, "new health works are not permitted at this stage\n");
-	spin_unlock_irqrestore(&health->wq_lock, flags);
+	queue_work(health->wq, &health->fatal_report_work);
 }
 
 #define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60)
@@ -858,7 +852,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
 
 	timer_setup(&health->timer, poll_health, 0);
 	health->fatal_error = MLX5_SENSOR_NO_ERR;
-	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+	clear_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
 	health->health = &dev->iseg->health;
 	health->health_counter = &dev->iseg->health_counter;
 
@@ -869,13 +863,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
 {
 	struct mlx5_core_health *health = &dev->priv.health;
-	unsigned long flags;
 
-	if (disable_health) {
-		spin_lock_irqsave(&health->wq_lock, flags);
-		set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
-		spin_unlock_irqrestore(&health->wq_lock, flags);
-	}
+	if (disable_health)
+		set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
 
 	del_timer_sync(&health->timer);
 }
@@ -891,11 +881,8 @@ void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
 void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
 {
 	struct mlx5_core_health *health = &dev->priv.health;
-	unsigned long flags;
 
-	spin_lock_irqsave(&health->wq_lock, flags);
-	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
-	spin_unlock_irqrestore(&health->wq_lock, flags);
+	set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
 	cancel_delayed_work_sync(&health->update_fw_log_ts_work);
 	cancel_work_sync(&health->report_work);
 	cancel_work_sync(&health->fatal_report_work);
@@ -928,7 +915,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
 	kfree(name);
 	if (!health->wq)
 		goto out_err;
-	spin_lock_init(&health->wq_lock);
 	INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
 	INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
 	INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index cd529e051b4d..91e8160ed087 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -430,8 +430,6 @@ struct mlx5_core_health {
 	u8				synd;
 	u32				fatal_error;
 	u32				crdump_size;
-	/* wq spinlock to synchronize draining */
-	spinlock_t			wq_lock;
 	struct workqueue_struct	       *wq;
 	unsigned long			flags;
 	struct work_struct		fatal_report_work;
-- 
2.39.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ