lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240508133408.54708-9-linyunsheng@huawei.com>
Date: Wed, 8 May 2024 21:34:03 +0800
From: Yunsheng Lin <linyunsheng@...wei.com>
To: <davem@...emloft.net>, <kuba@...nel.org>, <pabeni@...hat.com>
CC: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>, Yunsheng Lin
	<linyunsheng@...wei.com>, Alexander Duyck <alexander.duyck@...il.com>, Andrew
 Morton <akpm@...ux-foundation.org>, <linux-mm@...ck.org>
Subject: [PATCH net-next v3 08/13] mm: page_frag: reuse existing space for 'size' and 'pfmemalloc'

Currently there is one 'struct page_frag' for every 'struct
sock' and 'struct task_struct', we are about to replace the
'struct page_frag' with 'struct page_frag_cache' for them.
Before begin the replacing, we need to ensure the size of
'struct page_frag_cache' is not bigger than the size of
'struct page_frag', as there may be tens of thousands of
'struct sock' and 'struct task_struct' instances in the
system.

By or'ing the page order & pfmemalloc with lower bits of
'va' instead of using 'u16' or 'u32' for page size and 'u8'
for pfmemalloc, we are able to avoid 3 or 5 bytes space waste.
And page address & pfmemalloc & order is unchanged for the
same page in the same 'page_frag_cache' instance, it makes
sense to fit them together.

Also, it is better to replace 'offset' with 'remaining', which
is the remaining size for the cache in a 'page_frag_cache'
instance, we are able to do a single 'fragsz > remaining'
checking for the case of cache being enough, which should be the
fast path if we ensure size is zoro when 'va' == NULL by
memset'ing 'struct page_frag_cache' in page_frag_cache_init()
and page_frag_cache_drain().

After this patch, the size of 'struct page_frag_cache' should be
the same as the size of 'struct page_frag'.

CC: Alexander Duyck <alexander.duyck@...il.com>
Signed-off-by: Yunsheng Lin <linyunsheng@...wei.com>
---
 include/linux/page_frag_cache.h | 62 +++++++++++++++++----
 mm/page_frag_cache.c            | 98 ++++++++++++++++++++-------------
 2 files changed, 111 insertions(+), 49 deletions(-)

diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index 024ff73a7ea4..88e91ee57b91 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -8,29 +8,67 @@
 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 
+/*
+ * struct encoded_va - a nonexistent type marking this pointer
+ *
+ * An 'encoded_va' pointer is a pointer to a aligned virtual address, which is
+ * at least aligned to PAGE_SIZE, that means there are at least 12 lower bits
+ * space available for other purposes.
+ *
+ * Currently we use the lower 8 bits and bit 9 for the order and PFMEMALLOC
+ * flag of the page this 'va' is corresponding to.
+ *
+ * Use the supplied helper functions to endcode/decode the pointer and bits.
+ */
+struct encoded_va;
+
+#define PAGE_FRAG_CACHE_ORDER_MASK		GENMASK(7, 0)
+#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT		BIT(8)
+#define PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT	8
+
+static inline struct encoded_va *encode_aligned_va(void *va,
+						   unsigned int order,
+						   bool pfmemalloc)
+{
+	return (struct encoded_va *)((unsigned long)va | order |
+			pfmemalloc << PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT);
+}
+
+static inline unsigned long encoded_page_order(struct encoded_va *encoded_va)
+{
+	return PAGE_FRAG_CACHE_ORDER_MASK & (unsigned long)encoded_va;
+}
+
+static inline bool encoded_page_pfmemalloc(struct encoded_va *encoded_va)
+{
+	return PAGE_FRAG_CACHE_PFMEMALLOC_BIT & (unsigned long)encoded_va;
+}
+
+static inline void *encoded_page_address(struct encoded_va *encoded_va)
+{
+	return (void *)((unsigned long)encoded_va & PAGE_MASK);
+}
+
 struct page_frag_cache {
-	void *va;
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-	__u16 offset;
-	__u16 size;
+	struct encoded_va *encoded_va;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
+	u16 pagecnt_bias;
+	u16 remaining;
 #else
-	__u32 offset;
+	u32 pagecnt_bias;
+	u32 remaining;
 #endif
-	/* we maintain a pagecount bias, so that we dont dirty cache line
-	 * containing page->_refcount every time we allocate a fragment.
-	 */
-	unsigned int		pagecnt_bias;
-	bool pfmemalloc;
 };
 
 static inline void page_frag_cache_init(struct page_frag_cache *nc)
 {
-	nc->va = NULL;
+	memset(nc, 0, sizeof(*nc));
 }
 
 static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
 {
-	return !!nc->pfmemalloc;
+	return encoded_page_pfmemalloc(nc->encoded_va);
 }
 
 void page_frag_cache_drain(struct page_frag_cache *nc);
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index c0ecfa733727..4542d72e7b01 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -22,6 +22,7 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
 					     gfp_t gfp_mask)
 {
 	struct page *page = NULL;
+	unsigned int size, order;
 	gfp_t gfp = gfp_mask;
 
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
@@ -32,23 +33,41 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
 		   __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
 				PAGE_FRAG_CACHE_MAX_ORDER);
-	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
 #endif
-	if (unlikely(!page))
+	if (unlikely(!page)) {
 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+		if (unlikely(!page))
+			goto alloc_failed;
+
+		size = PAGE_SIZE;
+		order = 0;
+	} else {
+		size = PAGE_FRAG_CACHE_MAX_SIZE;
+		order = PAGE_FRAG_CACHE_MAX_ORDER;
+	}
 
-	nc->va = page ? page_address(page) : NULL;
+	nc->encoded_va = encode_aligned_va(page_address(page), order,
+					   page_is_pfmemalloc(page));
+	nc->remaining = size;
+	page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
+	nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
 
 	return page;
+
+alloc_failed:
+	nc->encoded_va = NULL;
+	nc->remaining = 0;
+	return NULL;
 }
 
 void page_frag_cache_drain(struct page_frag_cache *nc)
 {
-	if (!nc->va)
+	if (!nc->encoded_va)
 		return;
 
-	__page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
-	nc->va = NULL;
+	__page_frag_cache_drain(virt_to_head_page(nc->encoded_va),
+				nc->pagecnt_bias);
+	memset(nc, 0, sizeof(*nc));
 }
 EXPORT_SYMBOL(page_frag_cache_drain);
 
@@ -65,35 +84,32 @@ void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
 				 unsigned int fragsz, gfp_t gfp_mask,
 				 unsigned int align_mask)
 {
-	unsigned int size, offset;
+	unsigned int remaining, page_size;
+	struct encoded_va *encoded_va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	unsigned long page_order;
+#endif
 	struct page *page;
 
-	if (unlikely(!nc->va)) {
-refill:
-		page = __page_frag_cache_refill(nc, gfp_mask);
-		if (!page)
-			return NULL;
-
-		/* Even if we own the page, we do not use atomic_set().
-		 * This would break get_page_unless_zero() users.
-		 */
-		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
-
-		/* reset page count bias and offset to start of new frag */
-		nc->pfmemalloc = page_is_pfmemalloc(page);
-		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-		nc->offset = 0;
-	}
-
+alloc_fragment:
+	remaining = nc->remaining & align_mask;
+	encoded_va = nc->encoded_va;
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-	/* if size can vary use size else just use PAGE_SIZE */
-	size = nc->size;
+	page_order = encoded_page_order(encoded_va);
+	page_size = PAGE_SIZE << page_order;
 #else
-	size = PAGE_SIZE;
+	page_size = PAGE_SIZE;
 #endif
 
-	offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask);
-	if (unlikely(offset + fragsz > size)) {
+	if (unlikely(fragsz > remaining)) {
+		if (unlikely(!encoded_va)) {
+			page = __page_frag_cache_refill(nc, gfp_mask);
+			if (page)
+				goto alloc_fragment;
+
+			return NULL;
+		}
+
 		/* fragsz is not supposed to be bigger than PAGE_SIZE as we are
 		 * allowing order 3 page allocation to fail easily under low
 		 * memory condition.
@@ -101,14 +117,22 @@ void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
 		if (WARN_ON_ONCE(fragsz > PAGE_SIZE))
 			return NULL;
 
-		page = virt_to_page(nc->va);
+		page = virt_to_page(encoded_va);
+		if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) {
+			page = __page_frag_cache_refill(nc, gfp_mask);
+			if (page)
+				goto alloc_fragment;
 
-		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
-			goto refill;
+			return NULL;
+		}
 
-		if (unlikely(nc->pfmemalloc)) {
+		if (unlikely(encoded_page_pfmemalloc(encoded_va))) {
 			free_unref_page(page, compound_order(page));
-			goto refill;
+			page = __page_frag_cache_refill(nc,  gfp_mask);
+			if (page)
+				goto alloc_fragment;
+
+			return NULL;
 		}
 
 		/* OK, page count is 0, we can safely set it */
@@ -116,13 +140,13 @@ void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
 
 		/* reset page count bias and offset to start of new frag */
 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-		offset = 0;
+		remaining = page_size;
 	}
 
+	nc->remaining = remaining - fragsz;
 	nc->pagecnt_bias--;
-	nc->offset = offset + fragsz;
 
-	return nc->va + offset;
+	return encoded_page_address(encoded_va) + (page_size - remaining);
 }
 EXPORT_SYMBOL(__page_frag_alloc_va_align);
 
-- 
2.33.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ