lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230617121146.716077-3-dhowells@redhat.com>
Date:   Sat, 17 Jun 2023 13:11:31 +0100
From:   David Howells <dhowells@...hat.com>
To:     netdev@...r.kernel.org
Cc:     David Howells <dhowells@...hat.com>,
        Alexander Duyck <alexander.duyck@...il.com>,
        "David S. Miller" <davem@...emloft.net>,
        Eric Dumazet <edumazet@...gle.com>,
        Jakub Kicinski <kuba@...nel.org>,
        Paolo Abeni <pabeni@...hat.com>,
        Willem de Bruijn <willemdebruijn.kernel@...il.com>,
        David Ahern <dsahern@...nel.org>,
        Matthew Wilcox <willy@...radead.org>,
        Jens Axboe <axboe@...nel.dk>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, Menglong Dong <imagedong@...cent.com>
Subject: [PATCH net-next v2 02/17] net: Display info about MSG_SPLICE_PAGES memory handling in proc

Display information about the memory handling MSG_SPLICE_PAGES does to copy
slabbed data into page fragments.

For each CPU that has a cached folio, it displays the folio pfn, the offset
pointer within the folio and the size of the folio.

It also displays the number of pages refurbished and the number of pages
replaced.

Signed-off-by: David Howells <dhowells@...hat.com>
cc: Alexander Duyck <alexander.duyck@...il.com>
cc: Eric Dumazet <edumazet@...gle.com>
cc: "David S. Miller" <davem@...emloft.net>
cc: David Ahern <dsahern@...nel.org>
cc: Jakub Kicinski <kuba@...nel.org>
cc: Paolo Abeni <pabeni@...hat.com>
cc: Jens Axboe <axboe@...nel.dk>
cc: Matthew Wilcox <willy@...radead.org>
cc: Menglong Dong <imagedong@...cent.com>
cc: netdev@...r.kernel.org
---
 net/core/skbuff.c | 42 +++++++++++++++++++++++++++++++++++++++---
 1 file changed, 39 insertions(+), 3 deletions(-)

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d962c93a429d..36605510a76d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -83,6 +83,7 @@
 #include <linux/user_namespace.h>
 #include <linux/indirect_call_wrapper.h>
 #include <linux/textsearch.h>
+#include <linux/proc_fs.h>
 
 #include "dev.h"
 #include "sock_destructor.h"
@@ -6758,6 +6759,7 @@ nodefer:	__kfree_skb(skb);
 struct skb_splice_frag_cache {
 	struct folio	*folio;
 	void		*virt;
+	unsigned int	fsize;
 	unsigned int	offset;
 	/* we maintain a pagecount bias, so that we dont dirty cache line
 	 * containing page->_refcount every time we allocate a fragment.
@@ -6767,6 +6769,26 @@ struct skb_splice_frag_cache {
 };
 
 static DEFINE_PER_CPU(struct skb_splice_frag_cache, skb_splice_frag_cache);
+static atomic_t skb_splice_frag_replaced, skb_splice_frag_refurbished;
+
+static int skb_splice_show(struct seq_file *m, void *data)
+{
+	int cpu;
+
+	seq_printf(m, "refurb=%u repl=%u\n",
+		   atomic_read(&skb_splice_frag_refurbished),
+		   atomic_read(&skb_splice_frag_replaced));
+
+	for_each_possible_cpu(cpu) {
+		const struct skb_splice_frag_cache *cache =
+			per_cpu_ptr(&skb_splice_frag_cache, cpu);
+
+		seq_printf(m, "[%u] %lx %u/%u\n",
+			   cpu, folio_pfn(cache->folio),
+			   cache->offset, cache->fsize);
+	}
+	return 0;
+}
 
 /**
  * alloc_skb_frag - Allocate a page fragment for using in a socket
@@ -6803,17 +6825,21 @@ void *alloc_skb_frag(size_t fragsz, gfp_t gfp)
 
 insufficient_space:
 	/* See if we can refurbish the current folio. */
-	if (!folio || !folio_ref_sub_and_test(folio, cache->pagecnt_bias))
+	if (!folio)
 		goto get_new_folio;
+	if (!folio_ref_sub_and_test(folio, cache->pagecnt_bias))
+		goto replace_folio;
 	if (unlikely(cache->pfmemalloc)) {
 		__folio_put(folio);
-		goto get_new_folio;
+		goto replace_folio;
 	}
 
 	fsize = folio_size(folio);
 	if (unlikely(fragsz > fsize))
 		goto frag_too_big;
 
+	atomic_inc(&skb_splice_frag_refurbished);
+
 	/* OK, page count is 0, we can safely set it */
 	folio_set_count(folio, PAGE_FRAG_CACHE_MAX_SIZE + 1);
 
@@ -6822,6 +6848,8 @@ void *alloc_skb_frag(size_t fragsz, gfp_t gfp)
 	offset = fsize;
 	goto try_again;
 
+replace_folio:
+	atomic_inc(&skb_splice_frag_replaced);
 get_new_folio:
 	if (!spare) {
 		cache->folio = NULL;
@@ -6848,6 +6876,7 @@ void *alloc_skb_frag(size_t fragsz, gfp_t gfp)
 
 	cache->folio = spare;
 	cache->virt  = folio_address(spare);
+	cache->fsize = folio_size(spare);
 	folio = spare;
 	spare = NULL;
 
@@ -6858,7 +6887,7 @@ void *alloc_skb_frag(size_t fragsz, gfp_t gfp)
 
 	/* Reset page count bias and offset to start of new frag */
 	cache->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-	offset = folio_size(folio);
+	offset = cache->fsize;
 	goto try_again;
 
 frag_too_big:
@@ -7007,3 +7036,10 @@ ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
 	return spliced ?: ret;
 }
 EXPORT_SYMBOL(skb_splice_from_iter);
+
+static int skb_splice_init(void)
+{
+	proc_create_single("pagefrags", S_IFREG | 0444, NULL, &skb_splice_show);
+	return 0;
+}
+late_initcall(skb_splice_init);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ