lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 26 Jan 2022 14:48:15 -0800
From:   Joe Damato <jdamato@...tly.com>
To:     netdev@...r.kernel.org
Cc:     kuba@...nel.org, davem@...emloft.net, ilias.apalodimas@...aro.org,
        hawk@...nel.org, Joe Damato <jdamato@...tly.com>
Subject: [PATCH 1/6] net: page_pool: Add alloc stats and fast path stat

Add a stats structure with a an internal alloc structure for holding
allocation path related stats.

The alloc structure contains the stat 'fast'. This stat tracks fast
path allocations.

A static inline accessor function is exposed for accessing this stat.

Signed-off-by: Joe Damato <jdamato@...tly.com>
---
 include/net/page_pool.h | 26 ++++++++++++++++++++++++++
 net/core/page_pool.c    |  1 +
 2 files changed, 27 insertions(+)

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 79a8055..3ae3dc4 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -71,6 +71,20 @@ struct pp_alloc_cache {
 	struct page *cache[PP_ALLOC_CACHE_SIZE];
 };
 
+/**
+ * stats for tracking page_pool events.
+ *
+ * accessor functions for these stats provided below.
+ *
+ * Note that it is the responsibility of the API consumer to ensure that
+ * the page_pool has not been destroyed while accessing stats fields.
+ */
+struct page_pool_stats {
+	struct {
+		u64 fast; /* fast path allocations */
+	} alloc;
+};
+
 struct page_pool_params {
 	unsigned int	flags;
 	unsigned int	order;
@@ -86,6 +100,7 @@ struct page_pool_params {
 
 struct page_pool {
 	struct page_pool_params p;
+	struct page_pool_stats ps;
 
 	struct delayed_work release_dw;
 	void (*disconnect)(void *);
@@ -180,6 +195,12 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
 void page_pool_release_page(struct page_pool *pool, struct page *page);
 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 			     int count);
+
+static inline u64 page_pool_stats_get_fast(struct page_pool *pool)
+{
+	return pool->ps.alloc.fast;
+}
+
 #else
 static inline void page_pool_destroy(struct page_pool *pool)
 {
@@ -199,6 +220,11 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 					   int count)
 {
 }
+
+static inline u64 page_pool_stats_get_fast(struct page_pool *pool)
+{
+	return 0;
+}
 #endif
 
 void page_pool_put_page(struct page_pool *pool, struct page *page,
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index bd62c01..84c9566 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -166,6 +166,7 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
 	if (likely(pool->alloc.count)) {
 		/* Fast-path */
 		page = pool->alloc.cache[--pool->alloc.count];
+		pool->ps.alloc.fast++;
 	} else {
 		page = page_pool_refill_alloc_cache(pool);
 	}
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ