lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Wed, 16 Aug 2023 16:43:00 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: netdev@...r.kernel.org
Cc: hawk@...nel.org,
	ilias.apalodimas@...aro.org,
	aleksander.lobakin@...el.com,
	linyunsheng@...wei.com,
	almasrymina@...gle.com,
	Jakub Kicinski <kuba@...nel.org>
Subject: [RFC net-next 11/13] net: page_pool: report when page pool was destroyed

Report when page pool was destroyed and number of inflight
references. This allows user to proactively check for dead
page pools without waiting to see if errors messages will
be printed in dmesg.

Only provide inflight for pools which were already "destroyed".
inflight information could also be interesting for "live"
pools but we don't want to have to deal with the potential
negative values due to races.

Example output for a fake leaked page pool using some hacks
in netdevsim (one "live" pool, and one "leaked" on the same dev):

$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
           --dump page-pool-get
[{'id': 2, 'ifindex': 3},
 {'id': 1, 'ifindex': 3, 'destroyed': 133, 'inflight': 1}]

Signed-off-by: Jakub Kicinski <kuba@...nel.org>
---
 Documentation/netlink/specs/netdev.yaml | 16 ++++++++++++++++
 include/net/page_pool/types.h           |  1 +
 include/uapi/linux/netdev.h             |  2 ++
 net/core/page_pool.c                    |  3 ++-
 net/core/page_pool_priv.h               |  3 +++
 net/core/page_pool_user.c               | 16 ++++++++++++++++
 6 files changed, 40 insertions(+), 1 deletion(-)

diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 47ae2a45daea..1fd3b4235251 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -92,6 +92,20 @@ name: netdev
         name: napi-id
         doc: Id of NAPI using this Page Pool instance.
         type: u32
+      -
+        name: destroyed
+        type: u64
+        doc: |
+          Seconds in CLOCK_BOOTTIME of when Page Pool was destroyed.
+          Page Pools wait for all the memory allocated from them to be freed
+          before truly disappearing.
+          Absent if Page Pool hasn't been destroyed.
+      -
+        name: inflight
+        type: u32
+        doc: |
+          Number of outstanding references to this page pool (allocated
+          but yet to be freed pages).
 
 operations:
   list:
@@ -140,6 +154,8 @@ name: netdev
             - id
             - ifindex
             - napi-id
+            - destroyed
+            - inflight
       dump:
         reply: *pp-reply
     -
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 3017557e0c59..490c7419a474 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -195,6 +195,7 @@ struct page_pool {
 	/* User-facing fields, protected by page_pools_lock */
 	struct {
 		struct hlist_node list;
+		u64 destroyed;
 		u32 napi_id;
 		u32 id;
 	} user;
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 3c9818c1962a..aac840a3849b 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -53,6 +53,8 @@ enum {
 	NETDEV_A_PAGE_POOL_PAD,
 	NETDEV_A_PAGE_POOL_IFINDEX,
 	NETDEV_A_PAGE_POOL_NAPI_ID,
+	NETDEV_A_PAGE_POOL_DESTROYED,
+	NETDEV_A_PAGE_POOL_INFLIGHT,
 
 	__NETDEV_A_PAGE_POOL_MAX,
 	NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index de199c356043..733ca2198d94 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(page_pool_alloc_pages);
  */
 #define _distance(a, b)	(s32)((a) - (b))
 
-static s32 page_pool_inflight(struct page_pool *pool)
+s32 page_pool_inflight(const struct page_pool *pool)
 {
 	u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
 	u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
@@ -933,6 +933,7 @@ void page_pool_destroy(struct page_pool *pool)
 	if (!page_pool_release(pool))
 		return;
 
+	page_pool_destroyed(pool);
 	pool->defer_start = jiffies;
 	pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
 
diff --git a/net/core/page_pool_priv.h b/net/core/page_pool_priv.h
index 6c4e4aeed02a..892e0cddc400 100644
--- a/net/core/page_pool_priv.h
+++ b/net/core/page_pool_priv.h
@@ -3,7 +3,10 @@
 #ifndef __PAGE_POOL_PRIV_H
 #define __PAGE_POOL_PRIV_H
 
+s32 page_pool_inflight(const struct page_pool *pool);
+
 int page_pool_list(struct page_pool *pool);
+void page_pool_destroyed(struct page_pool *pool);
 void page_pool_unlist(struct page_pool *pool);
 
 #endif
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index ba2f27f15495..a74d6f18caac 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -126,6 +126,14 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
 	if (pool->user.napi_id &&
 	    nla_put_u32(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
 		goto err_cancel;
+	if (pool->user.destroyed) {
+		if (nla_put_u64_64bit(rsp, NETDEV_A_PAGE_POOL_DESTROYED,
+				      pool->user.destroyed,
+				      NETDEV_A_PAGE_POOL_PAD) ||
+		    nla_put_u32(rsp, NETDEV_A_PAGE_POOL_INFLIGHT,
+				page_pool_inflight(pool)))
+			goto err_cancel;
+	}
 
 	genlmsg_end(rsp, hdr);
 
@@ -211,6 +219,14 @@ int page_pool_list(struct page_pool *pool)
 	return err;
 }
 
+void page_pool_destroyed(struct page_pool *pool)
+{
+	mutex_lock(&page_pools_lock);
+	pool->user.destroyed = ktime_get_boottime_seconds();
+	netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
+	mutex_unlock(&page_pools_lock);
+}
+
 void page_pool_unlist(struct page_pool *pool)
 {
 	mutex_lock(&page_pools_lock);
-- 
2.41.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ