lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200220070800.364235-3-idosch@idosch.org>
Date:   Thu, 20 Feb 2020 09:07:47 +0200
From:   Ido Schimmel <idosch@...sch.org>
To:     netdev@...r.kernel.org
Cc:     davem@...emloft.net, jiri@...lanox.com, mlxsw@...lanox.com,
        Ido Schimmel <idosch@...lanox.com>
Subject: [PATCH net-next 02/15] mlxsw: spectrum: Protect counter pool with a lock

From: Ido Schimmel <idosch@...lanox.com>

The counter pool is a shared resource. It is used by both the ACL code
to allocate counters for actions and by the routing code to allocate
counters for adjacency entries (for example).

Currently, all allocations are protected by RTNL, but this is going to
change with the removal of RTNL from the routing code.

Therefore, protect counter allocations with a spin lock.

Signed-off-by: Ido Schimmel <idosch@...lanox.com>
Acked-by: Jiri Pirko <jiri@...lanox.com>
---
 .../ethernet/mellanox/mlxsw/spectrum_cnt.c    | 25 +++++++++++++++----
 1 file changed, 20 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 83c2e1e5f216..6a02ef9ec00e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -3,6 +3,7 @@
 
 #include <linux/kernel.h>
 #include <linux/bitops.h>
+#include <linux/spinlock.h>
 
 #include "spectrum_cnt.h"
 
@@ -18,6 +19,7 @@ struct mlxsw_sp_counter_sub_pool {
 struct mlxsw_sp_counter_pool {
 	unsigned int pool_size;
 	unsigned long *usage; /* Usage bitmap */
+	spinlock_t counter_pool_lock; /* Protects counter pool allocations */
 	struct mlxsw_sp_counter_sub_pool *sub_pools;
 };
 
@@ -87,6 +89,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 	if (!pool)
 		return -ENOMEM;
+	spin_lock_init(&pool->counter_pool_lock);
 
 	pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
 	map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
@@ -139,25 +142,35 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
 	struct mlxsw_sp_counter_sub_pool *sub_pool;
 	unsigned int entry_index;
 	unsigned int stop_index;
-	int i;
+	int i, err;
 
 	sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
 	stop_index = sub_pool->base_index + sub_pool->size;
 	entry_index = sub_pool->base_index;
 
+	spin_lock(&pool->counter_pool_lock);
 	entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
-	if (entry_index == stop_index)
-		return -ENOBUFS;
+	if (entry_index == stop_index) {
+		err = -ENOBUFS;
+		goto err_alloc;
+	}
 	/* The sub-pools can contain non-integer number of entries
 	 * so we must check for overflow
 	 */
-	if (entry_index + sub_pool->entry_size > stop_index)
-		return -ENOBUFS;
+	if (entry_index + sub_pool->entry_size > stop_index) {
+		err = -ENOBUFS;
+		goto err_alloc;
+	}
 	for (i = 0; i < sub_pool->entry_size; i++)
 		__set_bit(entry_index + i, pool->usage);
+	spin_unlock(&pool->counter_pool_lock);
 
 	*p_counter_index = entry_index;
 	return 0;
+
+err_alloc:
+	spin_unlock(&pool->counter_pool_lock);
+	return err;
 }
 
 void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
@@ -171,6 +184,8 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
 	if (WARN_ON(counter_index >= pool->pool_size))
 		return;
 	sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+	spin_lock(&pool->counter_pool_lock);
 	for (i = 0; i < sub_pool->entry_size; i++)
 		__clear_bit(counter_index + i, pool->usage);
+	spin_unlock(&pool->counter_pool_lock);
 }
-- 
2.24.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ