[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1747950086-1246773-8-git-send-email-tariqt@nvidia.com>
Date: Fri, 23 May 2025 00:41:22 +0300
From: Tariq Toukan <tariqt@...dia.com>
To: "David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Eric Dumazet <edumazet@...gle.com>, "Andrew
Lunn" <andrew+netdev@...n.ch>
CC: Saeed Mahameed <saeedm@...dia.com>, Leon Romanovsky <leon@...nel.org>,
Tariq Toukan <tariqt@...dia.com>, Richard Cochran <richardcochran@...il.com>,
Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>, John Fastabend
<john.fastabend@...il.com>, <netdev@...r.kernel.org>,
<linux-rdma@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<bpf@...r.kernel.org>, Moshe Shemesh <moshe@...dia.com>, Mark Bloch
<mbloch@...dia.com>, Gal Pressman <gal@...dia.com>, Cosmin Ratiu
<cratiu@...dia.com>, Dragos Tatulea <dtatulea@...dia.com>
Subject: [PATCH net-next V2 07/11] net/mlx5e: SHAMPO: Headers page pool stats
From: Saeed Mahameed <saeedm@...dia.com>
Expose the stats of the new headers page pool.
Signed-off-by: Saeed Mahameed <saeedm@...dia.com>
Reviewed-by: Dragos Tatulea <dtatulea@...dia.com>
Signed-off-by: Cosmin Ratiu <cratiu@...dia.com>
Signed-off-by: Tariq Toukan <tariqt@...dia.com>
---
.../ethernet/mellanox/mlx5/core/en_stats.c | 53 +++++++++++++++++++
.../ethernet/mellanox/mlx5/core/en_stats.h | 24 +++++++++
2 files changed, 77 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 19664fa7f217..dcfe86d6dc83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -205,6 +205,18 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
+
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_fast) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_slow) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_slow_high_order) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_refill) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_cached) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_ring) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_ring_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_released_ref) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -384,6 +396,18 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
+
+ s->rx_pp_hd_alloc_fast += rq_stats->pp_hd_alloc_fast;
+ s->rx_pp_hd_alloc_slow += rq_stats->pp_hd_alloc_slow;
+ s->rx_pp_hd_alloc_empty += rq_stats->pp_hd_alloc_empty;
+ s->rx_pp_hd_alloc_refill += rq_stats->pp_hd_alloc_refill;
+ s->rx_pp_hd_alloc_waive += rq_stats->pp_hd_alloc_waive;
+ s->rx_pp_hd_alloc_slow_high_order += rq_stats->pp_hd_alloc_slow_high_order;
+ s->rx_pp_hd_recycle_cached += rq_stats->pp_hd_recycle_cached;
+ s->rx_pp_hd_recycle_cache_full += rq_stats->pp_hd_recycle_cache_full;
+ s->rx_pp_hd_recycle_ring += rq_stats->pp_hd_recycle_ring;
+ s->rx_pp_hd_recycle_ring_full += rq_stats->pp_hd_recycle_ring_full;
+ s->rx_pp_hd_recycle_released_ref += rq_stats->pp_hd_recycle_released_ref;
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -511,6 +535,23 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
+
+ pool = c->rq.hd_page_pool;
+ if (!pool || !page_pool_get_stats(pool, &stats))
+ return;
+
+ rq_stats->pp_hd_alloc_fast = stats.alloc_stats.fast;
+ rq_stats->pp_hd_alloc_slow = stats.alloc_stats.slow;
+ rq_stats->pp_hd_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
+ rq_stats->pp_hd_alloc_empty = stats.alloc_stats.empty;
+ rq_stats->pp_hd_alloc_waive = stats.alloc_stats.waive;
+ rq_stats->pp_hd_alloc_refill = stats.alloc_stats.refill;
+
+ rq_stats->pp_hd_recycle_cached = stats.recycle_stats.cached;
+ rq_stats->pp_hd_recycle_cache_full = stats.recycle_stats.cache_full;
+ rq_stats->pp_hd_recycle_ring = stats.recycle_stats.ring;
+ rq_stats->pp_hd_recycle_ring_full = stats.recycle_stats.ring_full;
+ rq_stats->pp_hd_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
@@ -2130,6 +2171,18 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
+
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_fast) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_slow) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_slow_high_order) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_refill) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_cached) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_ring) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_ring_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_released_ref) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index def5dea1463d..113221dfcdfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -226,6 +226,18 @@ struct mlx5e_sw_stats {
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
+
+ u64 rx_pp_hd_alloc_fast;
+ u64 rx_pp_hd_alloc_slow;
+ u64 rx_pp_hd_alloc_slow_high_order;
+ u64 rx_pp_hd_alloc_empty;
+ u64 rx_pp_hd_alloc_refill;
+ u64 rx_pp_hd_alloc_waive;
+ u64 rx_pp_hd_recycle_cached;
+ u64 rx_pp_hd_recycle_cache_full;
+ u64 rx_pp_hd_recycle_ring;
+ u64 rx_pp_hd_recycle_ring_full;
+ u64 rx_pp_hd_recycle_released_ref;
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -394,6 +406,18 @@ struct mlx5e_rq_stats {
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
+
+ u64 pp_hd_alloc_fast;
+ u64 pp_hd_alloc_slow;
+ u64 pp_hd_alloc_slow_high_order;
+ u64 pp_hd_alloc_empty;
+ u64 pp_hd_alloc_refill;
+ u64 pp_hd_alloc_waive;
+ u64 pp_hd_recycle_cached;
+ u64 pp_hd_recycle_cache_full;
+ u64 pp_hd_recycle_ring;
+ u64 pp_hd_recycle_ring_full;
+ u64 pp_hd_recycle_released_ref;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
--
2.31.1
Powered by blists - more mailing lists