[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <e4a3bb0fb407ead607b85f7f041f24b586c8b99d.1649190493.git.lorenzo@kernel.org>
Date: Tue, 5 Apr 2022 22:32:12 +0200
From: Lorenzo Bianconi <lorenzo@...nel.org>
To: netdev@...r.kernel.org
Cc: lorenzo.bianconi@...hat.com, davem@...emloft.net, kuba@...nel.org,
pabeni@...hat.com, thomas.petazzoni@...tlin.com,
linux@...linux.org.uk, jbrouer@...hat.com,
ilias.apalodimas@...aro.org, jdamato@...tly.com
Subject: [PATCH net-next] net: mvneta: add support for page_pool_get_stats
Introduce support for the page_pool_get_stats API to mvneta driver.
If CONFIG_PAGE_POOL_STATS is enabled, ethtool will report page pool
stats.
Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
---
drivers/net/ethernet/marvell/mvneta.c | 105 ++++++++++++++++++++++++++
1 file changed, 105 insertions(+)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 934f6dd90992..b986a6bded9a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -382,6 +382,19 @@ enum {
ETHTOOL_XDP_TX_ERR,
ETHTOOL_XDP_XMIT,
ETHTOOL_XDP_XMIT_ERR,
+#ifdef CONFIG_PAGE_POOL_STATS
+ ETHTOOL_PP_ALLOC_FAST,
+ ETHTOOL_PP_ALLOC_SLOW,
+ ETHTOOL_PP_ALLOC_SLOW_HIGH_ORDER,
+ ETHTOOL_PP_ALLOC_EMPTY,
+ ETHTOOL_PP_ALLOC_REFILL,
+ ETHTOOL_PP_ALLOC_WAIVE,
+ ETHTOOL_PP_RECYCLE_CACHED,
+ ETHTOOL_PP_RECYCLE_CACHE_FULL,
+ ETHTOOL_PP_RECYCLE_RING,
+ ETHTOOL_PP_RECYCLE_RING_FULL,
+ ETHTOOL_PP_RECYCLE_RELEASED_REF,
+#endif /* CONFIG_PAGE_POOL_STATS */
ETHTOOL_MAX_STATS,
};
@@ -443,6 +456,19 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
{ ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
{ ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
+#ifdef CONFIG_PAGE_POOL_STATS
+ { ETHTOOL_PP_ALLOC_FAST, T_SW, "rx_pp_alloc_fast", },
+ { ETHTOOL_PP_ALLOC_SLOW, T_SW, "rx_pp_alloc_slow", },
+ { ETHTOOL_PP_ALLOC_SLOW_HIGH_ORDER, T_SW, "rx_pp_alloc_slow_ho", },
+ { ETHTOOL_PP_ALLOC_EMPTY, T_SW, "rx_pp_alloc_empty", },
+ { ETHTOOL_PP_ALLOC_REFILL, T_SW, "rx_pp_alloc_refill", },
+ { ETHTOOL_PP_ALLOC_WAIVE, T_SW, "rx_pp_alloc_waive", },
+ { ETHTOOL_PP_RECYCLE_CACHED, T_SW, "rx_pp_recycle_cached", },
+ { ETHTOOL_PP_RECYCLE_CACHE_FULL, T_SW, "rx_pp_recycle_cache_full", },
+ { ETHTOOL_PP_RECYCLE_RING, T_SW, "rx_pp_recycle_ring", },
+ { ETHTOOL_PP_RECYCLE_RING_FULL, T_SW, "rx_pp_recycle_ring_full", },
+ { ETHTOOL_PP_RECYCLE_RELEASED_REF, T_SW, "rx_pp_recycle_released_ref", },
+#endif /* CONFIG_PAGE_POOL_STATS */
};
struct mvneta_stats {
@@ -4783,16 +4809,56 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
}
}
+#ifdef CONFIG_PAGE_POOL_STATS
+static void mvneta_ethtool_update_pp_stats(struct mvneta_port *pp,
+ struct page_pool_stats *stats)
+{
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < rxq_number; i++) {
+ struct page_pool *page_pool = pp->rxqs[i].page_pool;
+ struct page_pool_stats pp_stats = {};
+
+ if (!page_pool_get_stats(page_pool, &pp_stats))
+ continue;
+
+ stats->alloc_stats.fast += pp_stats.alloc_stats.fast;
+ stats->alloc_stats.slow += pp_stats.alloc_stats.slow;
+ stats->alloc_stats.slow_high_order +=
+ pp_stats.alloc_stats.slow_high_order;
+ stats->alloc_stats.empty += pp_stats.alloc_stats.empty;
+ stats->alloc_stats.refill += pp_stats.alloc_stats.refill;
+ stats->alloc_stats.waive += pp_stats.alloc_stats.waive;
+ stats->recycle_stats.cached += pp_stats.recycle_stats.cached;
+ stats->recycle_stats.cache_full +=
+ pp_stats.recycle_stats.cache_full;
+ stats->recycle_stats.ring += pp_stats.recycle_stats.ring;
+ stats->recycle_stats.ring_full +=
+ pp_stats.recycle_stats.ring_full;
+ stats->recycle_stats.released_refcnt +=
+ pp_stats.recycle_stats.released_refcnt;
+ }
+}
+#endif /* CONFIG_PAGE_POOL_STATS */
+
static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
struct mvneta_ethtool_stats stats = {};
const struct mvneta_statistic *s;
+#ifdef CONFIG_PAGE_POOL_STATS
+ struct page_pool_stats pp_stats;
+#endif /* CONFIG_PAGE_POOL_STATS */
void __iomem *base = pp->base;
u32 high, low;
u64 val;
int i;
mvneta_ethtool_update_pcpu_stats(pp, &stats);
+#ifdef CONFIG_PAGE_POOL_STATS
+ mvneta_ethtool_update_pp_stats(pp, &pp_stats);
+#endif /* CONFIG_PAGE_POOL_STATS */
+
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
@@ -4841,6 +4907,45 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
case ETHTOOL_XDP_XMIT_ERR:
pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
break;
+#ifdef CONFIG_PAGE_POOL_STATS
+ case ETHTOOL_PP_ALLOC_FAST:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.fast;
+ break;
+ case ETHTOOL_PP_ALLOC_SLOW:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.slow;
+ break;
+ case ETHTOOL_PP_ALLOC_SLOW_HIGH_ORDER:
+ pp->ethtool_stats[i] =
+ pp_stats.alloc_stats.slow_high_order;
+ break;
+ case ETHTOOL_PP_ALLOC_EMPTY:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.empty;
+ break;
+ case ETHTOOL_PP_ALLOC_REFILL:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.refill;
+ break;
+ case ETHTOOL_PP_ALLOC_WAIVE:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.waive;
+ break;
+ case ETHTOOL_PP_RECYCLE_CACHED:
+ pp->ethtool_stats[i] = pp_stats.recycle_stats.cached;
+ break;
+ case ETHTOOL_PP_RECYCLE_CACHE_FULL:
+ pp->ethtool_stats[i] =
+ pp_stats.recycle_stats.cache_full;
+ break;
+ case ETHTOOL_PP_RECYCLE_RING:
+ pp->ethtool_stats[i] = pp_stats.recycle_stats.ring;
+ break;
+ case ETHTOOL_PP_RECYCLE_RING_FULL:
+ pp->ethtool_stats[i] =
+ pp_stats.recycle_stats.ring_full;
+ break;
+ case ETHTOOL_PP_RECYCLE_RELEASED_REF:
+ pp->ethtool_stats[i] =
+ pp_stats.recycle_stats.released_refcnt;
+ break;
+#endif /* CONFIG_PAGE_POOL_STATS */
}
break;
}
--
2.35.1
Powered by blists - more mailing lists