[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250107060708.1610882-8-tariqt@nvidia.com>
Date: Tue, 7 Jan 2025 08:07:02 +0200
From: Tariq Toukan <tariqt@...dia.com>
To: "David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Eric Dumazet <edumazet@...gle.com>, "Andrew
Lunn" <andrew+netdev@...n.ch>
CC: <netdev@...r.kernel.org>, Saeed Mahameed <saeedm@...dia.com>, Gal Pressman
<gal@...dia.com>, Leon Romanovsky <leonro@...dia.com>, Mark Bloch
<mbloch@...dia.com>, Moshe Shemesh <moshe@...dia.com>, Yevgeny Kliteynik
<kliteyn@...dia.com>, Tariq Toukan <tariqt@...dia.com>
Subject: [PATCH net-next 07/13] net/mlx5: fs, manage flow counters HWS action sharing by refcount
From: Moshe Shemesh <moshe@...dia.com>
Multiple flow counters can utilize a single Hardware Steering (HWS)
action for Hardware Steering rules. Given that these counter bulks are
not exclusively created for Hardware Steering, but also serve purposes
such as statistics gathering and other steering modes, it's more
efficient to create the HWS action only when it's first needed by a
Hardware Steering rule. This approach allows for better resource
management through the use of a reference count, rather than
automatically creating an HWS action for every bulk of flow counters.
Signed-off-by: Moshe Shemesh <moshe@...dia.com>
Reviewed-by: Yevgeny Kliteynik <kliteyn@...dia.com>
Reviewed-by: Mark Bloch <mbloch@...dia.com>
Signed-off-by: Tariq Toukan <tariqt@...dia.com>
---
.../net/ethernet/mellanox/mlx5/core/fs_core.h | 36 ++++++++++++++
.../ethernet/mellanox/mlx5/core/fs_counters.c | 37 ++++-----------
.../mlx5/core/steering/hws/fs_hws_pools.c | 47 +++++++++++++++++++
.../mlx5/core/steering/hws/fs_hws_pools.h | 3 ++
4 files changed, 94 insertions(+), 29 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 06ec48f51b6d..b6543a53d7c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -316,6 +316,42 @@ struct mlx5_flow_root_namespace {
const struct mlx5_flow_cmds *cmds;
};
+enum mlx5_fc_type {
+ MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_LOCAL,
+};
+
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ u32 id;
+ bool aging;
+ enum mlx5_fc_type type;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_cache cache;
+ /* last{packets,bytes} are used for calculating deltas since last reading. */
+ u64 lastpackets;
+ u64 lastbytes;
+};
+
+struct mlx5_fc_bulk_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fc_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ u32 base_id;
+ struct mlx5_fc_bulk_hws_data hws_data;
+ struct mlx5_fc fcs[];
+};
+
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 94d9caacd50f..492775d3d193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -44,28 +44,6 @@
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
-enum mlx5_fc_type {
- MLX5_FC_TYPE_ACQUIRED = 0,
- MLX5_FC_TYPE_LOCAL,
-};
-
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
-};
-
-struct mlx5_fc {
- u32 id;
- bool aging;
- enum mlx5_fc_type type;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_cache cache;
- /* last{packets,bytes} are used for calculating deltas since last reading. */
- u64 lastpackets;
- u64 lastbytes;
-};
-
struct mlx5_fc_stats {
struct xarray counters;
@@ -434,13 +412,7 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
fc_stats->sampling_interval);
}
-/* Flow counter bluks */
-
-struct mlx5_fc_bulk {
- struct mlx5_fs_bulk fs_bulk;
- u32 base_id;
- struct mlx5_fc fcs[];
-};
+/* Flow counter bulks */
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
u32 id)
@@ -449,6 +421,11 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
counter->id = id;
}
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
+{
+ return counter->bulk->base_id;
+}
+
static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
void *pool_ctx)
{
@@ -474,6 +451,8 @@ static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
for (i = 0; i < bulk_len; i++)
mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
return &fc_bulk->fs_bulk;
fs_bulk_cleanup:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
index 60dc0aaccbba..692fd2d2c0ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -400,3 +400,50 @@ bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
return false;
return true;
}
+
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
+
+ fc_bulk_hws = &fc_bulk->hws_data;
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
+ return fc_bulk_hws->hws_action;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return fc_bulk_hws->hws_action;
+ }
+ fc_bulk_hws->hws_action =
+ mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
+ if (!fc_bulk_hws->hws_action) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return NULL;
+ }
+ refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
+ mutex_unlock(&fc_bulk_hws->lock);
+
+ return fc_bulk_hws->hws_action;
+}
+
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
+{
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
+ return;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fc_bulk_hws->hws_action);
+ fc_bulk_hws->hws_action = NULL;
+ mutex_unlock(&fc_bulk_hws->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
index eda17031aef0..cde8176c981a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
@@ -67,4 +67,7 @@ void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
struct mlx5_fs_hws_mh *mh_data);
bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
struct mlx5hws_action_mh_pattern *pattern);
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter);
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter);
#endif /* __MLX5_FS_HWS_POOLS_H__ */
--
2.45.0
Powered by blists - more mailing lists