lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200318134857.1003018-9-idosch@idosch.org>
Date:   Wed, 18 Mar 2020 15:48:56 +0200
From:   Ido Schimmel <idosch@...sch.org>
To:     netdev@...r.kernel.org
Cc:     davem@...emloft.net, kuba@...nel.org, jiri@...lanox.com,
        mlxsw@...lanox.com, Ido Schimmel <idosch@...lanox.com>
Subject: [PATCH net-next 8/9] mlxsw: spectrum_cnt: Expose devlink resource occupancy for counters

From: Jiri Pirko <jiri@...lanox.com>

Implement occupancy counting for counters and expose over devlink
resource API.

Signed-off-by: Jiri Pirko <jiri@...lanox.com>
Signed-off-by: Ido Schimmel <idosch@...lanox.com>
---
 .../ethernet/mellanox/mlxsw/spectrum_cnt.c    | 63 ++++++++++++++++++-
 1 file changed, 62 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 417c512bc7a2..0268f0a6662a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -15,12 +15,14 @@ struct mlxsw_sp_counter_sub_pool {
 	u64 resource_id; /* devlink resource id */
 	unsigned int entry_size;
 	unsigned int bank_count;
+	atomic_t active_entries_count;
 };
 
 struct mlxsw_sp_counter_pool {
 	u64 pool_size;
 	unsigned long *usage; /* Usage bitmap */
 	spinlock_t counter_pool_lock; /* Protects counter pool allocations */
+	atomic_t active_entries_count;
 	unsigned int sub_pools_count;
 	struct mlxsw_sp_counter_sub_pool sub_pools[];
 };
@@ -40,6 +42,13 @@ static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
 	}
 };
 
+static u64 mlxsw_sp_counter_sub_pool_occ_get(void *priv)
+{
+	const struct mlxsw_sp_counter_sub_pool *sub_pool = priv;
+
+	return atomic_read(&sub_pool->active_entries_count);
+}
+
 static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
@@ -62,12 +71,50 @@ static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
 						sub_pool->resource_id,
 						&sub_pool->size);
 		if (err)
-			return err;
+			goto err_resource_size_get;
+
+		devlink_resource_occ_get_register(devlink,
+						  sub_pool->resource_id,
+						  mlxsw_sp_counter_sub_pool_occ_get,
+						  sub_pool);
 
 		sub_pool->base_index = base_index;
 		base_index += sub_pool->size;
+		atomic_set(&sub_pool->active_entries_count, 0);
 	}
 	return 0;
+
+err_resource_size_get:
+	for (i--; i >= 0; i--) {
+		sub_pool = &pool->sub_pools[i];
+
+		devlink_resource_occ_get_unregister(devlink,
+						    sub_pool->resource_id);
+	}
+	return err;
+}
+
+static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
+{
+	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+	struct mlxsw_sp_counter_sub_pool *sub_pool;
+	int i;
+
+	for (i = 0; i < pool->sub_pools_count; i++) {
+		sub_pool = &pool->sub_pools[i];
+
+		WARN_ON(atomic_read(&sub_pool->active_entries_count));
+		devlink_resource_occ_get_unregister(devlink,
+						    sub_pool->resource_id);
+	}
+}
+
+static u64 mlxsw_sp_counter_pool_occ_get(void *priv)
+{
+	const struct mlxsw_sp_counter_pool *pool = priv;
+
+	return atomic_read(&pool->active_entries_count);
 }
 
 int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
@@ -88,11 +135,14 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
 	       sub_pools_count * sizeof(*sub_pool));
 	pool->sub_pools_count = sub_pools_count;
 	spin_lock_init(&pool->counter_pool_lock);
+	atomic_set(&pool->active_entries_count, 0);
 
 	err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
 					&pool->pool_size);
 	if (err)
 		goto err_pool_resource_size_get;
+	devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+					  mlxsw_sp_counter_pool_occ_get, pool);
 
 	map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
 
@@ -111,6 +161,8 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
 err_sub_pools_init:
 	kfree(pool->usage);
 err_usage_alloc:
+	devlink_resource_occ_get_unregister(devlink,
+					    MLXSW_SP_RESOURCE_COUNTERS);
 err_pool_resource_size_get:
 	kfree(pool);
 	return err;
@@ -119,10 +171,15 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
 void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
 {
 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
 
+	mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
 	WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
 			       pool->pool_size);
+	WARN_ON(atomic_read(&pool->active_entries_count));
 	kfree(pool->usage);
+	devlink_resource_occ_get_unregister(devlink,
+					    MLXSW_SP_RESOURCE_COUNTERS);
 	kfree(pool);
 }
 
@@ -158,6 +215,8 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
 	spin_unlock(&pool->counter_pool_lock);
 
 	*p_counter_index = entry_index;
+	atomic_add(sub_pool->entry_size, &sub_pool->active_entries_count);
+	atomic_add(sub_pool->entry_size, &pool->active_entries_count);
 	return 0;
 
 err_alloc:
@@ -180,6 +239,8 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
 	for (i = 0; i < sub_pool->entry_size; i++)
 		__clear_bit(counter_index + i, pool->usage);
 	spin_unlock(&pool->counter_pool_lock);
+	atomic_sub(sub_pool->entry_size, &sub_pool->active_entries_count);
+	atomic_sub(sub_pool->entry_size, &pool->active_entries_count);
 }
 
 int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
-- 
2.24.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ