lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1744312662-356571-7-git-send-email-tariqt@nvidia.com>
Date: Thu, 10 Apr 2025 22:17:36 +0300
From: Tariq Toukan <tariqt@...dia.com>
To: "David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
	Paolo Abeni <pabeni@...hat.com>, Eric Dumazet <edumazet@...gle.com>, "Andrew
 Lunn" <andrew+netdev@...n.ch>
CC: Gal Pressman <gal@...dia.com>, Leon Romanovsky <leonro@...dia.com>, "Saeed
 Mahameed" <saeedm@...dia.com>, Leon Romanovsky <leon@...nel.org>, Tariq
 Toukan <tariqt@...dia.com>, <netdev@...r.kernel.org>,
	<linux-rdma@...r.kernel.org>, <linux-kernel@...r.kernel.org>, Moshe Shemesh
	<moshe@...dia.com>, Mark Bloch <mbloch@...dia.com>, Vlad Dogaru
	<vdogaru@...dia.com>, Yevgeny Kliteynik <kliteyn@...dia.com>, Michal Kubiak
	<michal.kubiak@...el.com>
Subject: [PATCH net-next V2 06/12] net/mlx5: HWS, Add fullness tracking to pool

From: Vlad Dogaru <vdogaru@...dia.com>

Future users will need to query whether a pool is empty.

Signed-off-by: Vlad Dogaru <vdogaru@...dia.com>
Reviewed-by: Yevgeny Kliteynik <kliteyn@...dia.com>
Reviewed-by: Mark Bloch <mbloch@...dia.com>
Signed-off-by: Tariq Toukan <tariqt@...dia.com>
Reviewed-by: Michal Kubiak <michal.kubiak@...el.com>
---
 .../mellanox/mlx5/core/steering/hws/pool.c    |  7 ++++++
 .../mellanox/mlx5/core/steering/hws/pool.h    | 25 +++++++++++++++++++
 2 files changed, 32 insertions(+)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index 270b333faab3..26d85fe3c417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -324,6 +324,8 @@ int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
 
 	mutex_lock(&pool->lock);
 	ret = pool->p_get_chunk(pool, chunk);
+	if (ret == 0)
+		pool->available_elems -= 1 << chunk->order;
 	mutex_unlock(&pool->lock);
 
 	return ret;
@@ -334,6 +336,7 @@ void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
 {
 	mutex_lock(&pool->lock);
 	pool->p_put_chunk(pool, chunk);
+	pool->available_elems += 1 << chunk->order;
 	mutex_unlock(&pool->lock);
 }
 
@@ -360,6 +363,7 @@ mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_
 		res_db_type = MLX5HWS_POOL_DB_TYPE_BITMAP;
 
 	pool->alloc_log_sz = pool_attr->alloc_log_sz;
+	pool->available_elems = 1 << pool_attr->alloc_log_sz;
 
 	if (hws_pool_db_init(pool, res_db_type))
 		goto free_pool;
@@ -377,6 +381,9 @@ void mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
 {
 	mutex_destroy(&pool->lock);
 
+	if (pool->available_elems != 1 << pool->alloc_log_sz)
+		mlx5hws_err(pool->ctx, "Attempting to destroy non-empty pool\n");
+
 	if (pool->resource)
 		hws_pool_resource_free(pool);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
index 9a781a87f097..c82760d53e1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
@@ -71,6 +71,7 @@ struct mlx5hws_pool {
 	enum mlx5hws_pool_flags flags;
 	struct mutex lock; /* protect the pool */
 	size_t alloc_log_sz;
+	size_t available_elems;
 	enum mlx5hws_table_type tbl_type;
 	enum mlx5hws_pool_optimize opt_type;
 	struct mlx5hws_pool_resource *resource;
@@ -103,4 +104,28 @@ static inline u32 mlx5hws_pool_get_base_mirror_id(struct mlx5hws_pool *pool)
 {
 	return pool->mirror_resource->base_id;
 }
+
+static inline bool
+mlx5hws_pool_empty(struct mlx5hws_pool *pool)
+{
+	bool ret;
+
+	mutex_lock(&pool->lock);
+	ret = pool->available_elems == 0;
+	mutex_unlock(&pool->lock);
+
+	return ret;
+}
+
+static inline bool
+mlx5hws_pool_full(struct mlx5hws_pool *pool)
+{
+	bool ret;
+
+	mutex_lock(&pool->lock);
+	ret = pool->available_elems == (1 << pool->alloc_log_sz);
+	mutex_unlock(&pool->lock);
+
+	return ret;
+}
 #endif /* MLX5HWS_POOL_H_ */
-- 
2.31.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ