[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200318134857.1003018-4-idosch@idosch.org>
Date: Wed, 18 Mar 2020 15:48:51 +0200
From: Ido Schimmel <idosch@...sch.org>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net, kuba@...nel.org, jiri@...lanox.com,
mlxsw@...lanox.com, Ido Schimmel <idosch@...lanox.com>
Subject: [PATCH net-next 3/9] mlxsw: spectrum_cnt: Move sub_pools under per-instance pool struct
From: Jiri Pirko <jiri@...lanox.com>
Currently, the global static array of subpools is used. Make it
per-instance as multiple instances of the mlxsw driver can have
different values.
Signed-off-by: Jiri Pirko <jiri@...lanox.com>
Signed-off-by: Ido Schimmel <idosch@...lanox.com>
---
.../ethernet/mellanox/mlxsw/spectrum_cnt.c | 45 +++++++++++--------
1 file changed, 27 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 37811181586a..4cdabde47dd0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -18,10 +18,11 @@ struct mlxsw_sp_counter_pool {
unsigned int pool_size;
unsigned long *usage; /* Usage bitmap */
spinlock_t counter_pool_lock; /* Protects counter pool allocations */
- struct mlxsw_sp_counter_sub_pool *sub_pools;
+ unsigned int sub_pools_count;
+ struct mlxsw_sp_counter_sub_pool sub_pools[];
};
-static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
+static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
[MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
.bank_count = 6,
},
@@ -32,6 +33,7 @@ static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
unsigned int total_bank_config = 0;
unsigned int pool_size;
unsigned int bank_size;
@@ -40,8 +42,8 @@ static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_BANK_SIZE);
/* Check config is valid, no bank over subscription */
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
- total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
+ for (i = 0; i < pool->sub_pools_count; i++)
+ total_bank_config += pool->sub_pools[i].bank_count;
if (total_bank_config > pool_size / bank_size + 1)
return -EINVAL;
return 0;
@@ -49,16 +51,17 @@ static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
struct mlxsw_sp_counter_sub_pool *sub_pool;
/* Prepare generic flow pool*/
- sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
+ sub_pool = &pool->sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
return -EIO;
sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
COUNTER_SIZE_PACKETS_BYTES);
/* Prepare erif pool*/
- sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
+ sub_pool = &pool->sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
return -EIO;
sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
@@ -68,6 +71,7 @@ static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
{
+ unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
struct mlxsw_sp_counter_sub_pool *sub_pool;
struct mlxsw_sp_counter_pool *pool;
unsigned int base_index;
@@ -80,18 +84,23 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_BANK_SIZE))
return -EIO;
+ pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
+ GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+ mlxsw_sp->counter_pool = pool;
+ memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
+ sub_pools_count * sizeof(*sub_pool));
+ pool->sub_pools_count = sub_pools_count;
+ spin_lock_init(&pool->counter_pool_lock);
+
err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
if (err)
- return err;
+ goto err_pool_validate;
err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
if (err)
- return err;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool)
- return -ENOMEM;
- spin_lock_init(&pool->counter_pool_lock);
+ goto err_sub_pools_prepare;
pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
@@ -104,12 +113,11 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
goto err_usage_alloc;
}
- pool->sub_pools = mlxsw_sp_counter_sub_pools;
/* Allocation is based on bank count which should be
* specified for each sub pool statically.
*/
base_index = 0;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
+ for (i = 0; i < pool->sub_pools_count; i++) {
sub_pool = &pool->sub_pools[i];
sub_pool->size = sub_pool->bank_count * bank_size;
sub_pool->base_index = base_index;
@@ -119,10 +127,11 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
sub_pool->size = pool->pool_size - sub_pool->base_index;
}
- mlxsw_sp->counter_pool = pool;
return 0;
err_usage_alloc:
+err_sub_pools_prepare:
+err_pool_validate:
kfree(pool);
return err;
}
@@ -147,7 +156,7 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int stop_index;
int i, err;
- sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ sub_pool = &pool->sub_pools[sub_pool_id];
stop_index = sub_pool->base_index + sub_pool->size;
entry_index = sub_pool->base_index;
@@ -186,7 +195,7 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(counter_index >= pool->pool_size))
return;
- sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ sub_pool = &pool->sub_pools[sub_pool_id];
spin_lock(&pool->counter_pool_lock);
for (i = 0; i < sub_pool->entry_size; i++)
__clear_bit(counter_index + i, pool->usage);
--
2.24.1
Powered by blists - more mailing lists