[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241001103709.58127-3-tariqt@nvidia.com>
Date: Tue, 1 Oct 2024 13:37:05 +0300
From: Tariq Toukan <tariqt@...dia.com>
To: "David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Eric Dumazet <edumazet@...gle.com>
CC: <netdev@...r.kernel.org>, Saeed Mahameed <saeedm@...dia.com>, Gal Pressman
<gal@...dia.com>, Leon Romanovsky <leonro@...dia.com>, Simon Horman
<horms@...nel.org>, Cosmin Ratiu <cratiu@...dia.com>, Tariq Toukan
<tariqt@...dia.com>
Subject: [PATCH net-next V2 2/6] net/mlx5: hw counters: Use kvmalloc for bulk query buffer
From: Cosmin Ratiu <cratiu@...dia.com>
The bulk query buffer starts out small (see [1]) and as soon as the
number of counters goes past the initial threshold grows to max
size (32K entries, 512KB) with a retry scheme.
This commit switches to using kvmalloc for the buffer, which has a near
zero likelihood of failing, and thus the explicit retry scheme becomes
superfluous and is taken out. On the low chance the allocation fails, it
will still be retried every sampling_interval, when the wq task runs.
[1] commit b247f32aecad ("net/mlx5: Dynamically resize flow counters
query buffer")
Signed-off-by: Cosmin Ratiu <cratiu@...dia.com>
Signed-off-by: Tariq Toukan <tariqt@...dia.com>
---
.../ethernet/mellanox/mlx5/core/fs_counters.c | 59 +++++++------------
1 file changed, 22 insertions(+), 37 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 7d6174d0f260..9892895da9ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -38,7 +38,6 @@
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
-#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
#define MLX5_INIT_COUNTERS_BULK 8
@@ -263,39 +262,28 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
mlx5_fc_free(dev, counter);
}
-static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
+static void mlx5_fc_stats_bulk_query_buf_realloc(struct mlx5_core_dev *dev,
+ int bulk_query_len)
{
struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
- int max_bulk_len = get_max_bulk_query_len(dev);
- unsigned long now = jiffies;
u32 *bulk_query_out_tmp;
- int max_out_len;
-
- if (fc_stats->bulk_query_alloc_failed &&
- time_before(now, fc_stats->next_bulk_query_alloc))
- return;
+ int out_len;
- max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
- bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
+ out_len = mlx5_cmd_fc_get_bulk_query_out_len(bulk_query_len);
+ bulk_query_out_tmp = kvzalloc(out_len, GFP_KERNEL);
if (!bulk_query_out_tmp) {
mlx5_core_warn_once(dev,
- "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
- max_bulk_len);
- fc_stats->bulk_query_alloc_failed = true;
- fc_stats->next_bulk_query_alloc =
- now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
+ "Can't increase flow counters bulk query buffer size, alloc failed, bulk_query_len(%d)\n",
+ bulk_query_len);
return;
}
- kfree(fc_stats->bulk_query_out);
+ kvfree(fc_stats->bulk_query_out);
fc_stats->bulk_query_out = bulk_query_out_tmp;
- fc_stats->bulk_query_len = max_bulk_len;
- if (fc_stats->bulk_query_alloc_failed) {
- mlx5_core_info(dev,
- "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
- max_bulk_len);
- fc_stats->bulk_query_alloc_failed = false;
- }
+ fc_stats->bulk_query_len = bulk_query_len;
+ mlx5_core_info(dev,
+ "Flow counters bulk query buffer size increased, bulk_query_len(%d)\n",
+ bulk_query_len);
}
static void mlx5_fc_stats_work(struct work_struct *work)
@@ -327,13 +315,14 @@ static void mlx5_fc_stats_work(struct work_struct *work)
fc_stats->num_counters--;
}
- if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
- fc_stats->num_counters > get_init_bulk_query_len(dev))
- mlx5_fc_stats_bulk_query_size_increase(dev);
-
if (time_before(now, fc_stats->next_query) ||
list_empty(&fc_stats->counters))
return;
+
+ if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
+ fc_stats->num_counters > get_init_bulk_query_len(dev))
+ mlx5_fc_stats_bulk_query_buf_realloc(dev, get_max_bulk_query_len(dev));
+
last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
@@ -453,12 +442,11 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats;
- int init_bulk_len;
- int init_out_len;
fc_stats = kzalloc(sizeof(*fc_stats), GFP_KERNEL);
if (!fc_stats)
return -ENOMEM;
+ dev->priv.fc_stats = fc_stats;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
@@ -466,12 +454,10 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
- init_bulk_len = get_init_bulk_query_len(dev);
- init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
- fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
+ /* Allocate initial (small) bulk query buffer. */
+ mlx5_fc_stats_bulk_query_buf_realloc(dev, get_init_bulk_query_len(dev));
if (!fc_stats->bulk_query_out)
goto err_bulk;
- fc_stats->bulk_query_len = init_bulk_len;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
@@ -481,12 +467,11 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
- dev->priv.fc_stats = fc_stats;
return 0;
err_wq_create:
- kfree(fc_stats->bulk_query_out);
+ kvfree(fc_stats->bulk_query_out);
err_bulk:
kfree(fc_stats);
return -ENOMEM;
@@ -512,7 +497,7 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
idr_destroy(&fc_stats->counters_idr);
- kfree(fc_stats->bulk_query_out);
+ kvfree(fc_stats->bulk_query_out);
kfree(fc_stats);
}
--
2.44.0
Powered by blists - more mailing lists