lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240407130850.19625-10-linyunsheng@huawei.com>
Date: Sun, 7 Apr 2024 21:08:46 +0800
From: Yunsheng Lin <linyunsheng@...wei.com>
To: <davem@...emloft.net>, <kuba@...nel.org>, <pabeni@...hat.com>
CC: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>, Yunsheng Lin
	<linyunsheng@...wei.com>, Andrew Morton <akpm@...ux-foundation.org>,
	<linux-mm@...ck.org>
Subject: [PATCH net-next v1 09/12] mm: page_frag: introduce prepare/commit API for page_frag

There are many use cases that need minimum memory in order
for forward progressing, but can do better if there is more
memory available.

Currently skb_page_frag_refill() API is used to solve the
above usecases, as mentioned in [1], its implementation is
similar to the one in mm subsystem.

To unify those two page_frag implementations, introduce a
prepare API to ensure minimum memory is satisfied and return
how much the actual memory is available to the caller.

And the caller can decide how much memory to use by calling
commit API, or not calling the commit API if deciding to not
use any memory.

Note it seems hard to decide which header files for caling
virt_to_page() in the inline helper, so macro is used instead
of inline helper to avoid dealing with that.

1. https://lore.kernel.org/all/20240228093013.8263-1-linyunsheng@huawei.com/

Signed-off-by: Yunsheng Lin <linyunsheng@...wei.com>
---
 include/linux/page_frag_cache.h | 141 +++++++++++++++++++++++++++++++-
 mm/page_frag_cache.c            |  13 ++-
 2 files changed, 144 insertions(+), 10 deletions(-)

diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index a97a1ac017d6..28185969cd2c 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -43,8 +43,25 @@ static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
 
 void page_frag_cache_drain(struct page_frag_cache *nc);
 void __page_frag_cache_drain(struct page *page, unsigned int count);
-void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
-			 gfp_t gfp_mask);
+void *page_frag_cache_refill(struct page_frag_cache *nc, unsigned int fragsz,
+			     gfp_t gfp_mask);
+
+static inline void *page_frag_alloc_va(struct page_frag_cache *nc,
+				       unsigned int fragsz, gfp_t gfp_mask)
+{
+	unsigned int offset;
+	void *va;
+
+	va = page_frag_cache_refill(nc, fragsz, gfp_mask);
+	if (unlikely(!va))
+		return NULL;
+
+	offset = nc->offset;
+	nc->pagecnt_bias--;
+	nc->offset = offset + fragsz;
+
+	return va + offset;
+}
 
 static inline void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
 					       unsigned int fragsz,
@@ -69,6 +86,126 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
 	return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, align);
 }
 
+static inline void *page_frag_alloc_va_prepare(struct page_frag_cache *nc,
+					       unsigned int *offset,
+					       unsigned int *size,
+					       gfp_t gfp_mask)
+{
+	void *va;
+
+	va = page_frag_cache_refill(nc, *size, gfp_mask);
+	if (unlikely(!va))
+		return NULL;
+
+	*offset = nc->offset;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	*size = nc->size_mask - *offset + 1;
+#else
+	*size = PAGE_SIZE - *offset;
+#endif
+
+	return va + *offset;
+}
+
+static inline void *page_frag_alloc_va_prepare_align(struct page_frag_cache *nc,
+						     unsigned int *offset,
+						     unsigned int *size,
+						     unsigned int align,
+						     gfp_t gfp_mask)
+{
+	WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE ||
+		     *size < sizeof(unsigned int));
+
+	*offset = nc->offset;
+	nc->offset = ALIGN(*offset, align);
+	return page_frag_alloc_va_prepare(nc, offset, size, gfp_mask);
+}
+
+static inline void *__page_frag_alloc_pg_prepare(struct page_frag_cache *nc,
+						 unsigned int *offset,
+						 unsigned int *size,
+						 gfp_t gfp_mask)
+{
+	void *va;
+
+	va = page_frag_cache_refill(nc, *size, gfp_mask);
+	if (unlikely(!va))
+		return NULL;
+
+	*offset = nc->offset;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	*size = nc->size_mask - *offset + 1;
+#else
+	*size = PAGE_SIZE - *offset;
+#endif
+
+	return va;
+}
+
+#define page_frag_alloc_pg_prepare(nc, offset, size, gfp)		\
+({									\
+	struct page *__page = NULL;					\
+	void *__va;							\
+									\
+	__va = __page_frag_alloc_pg_prepare(nc, offset, size, gfp);	\
+	if (likely(__va))						\
+		__page = virt_to_page(__va);				\
+									\
+	__page;								\
+})
+
+static inline void *__page_frag_alloc_prepare(struct page_frag_cache *nc,
+					      unsigned int *offset,
+					      unsigned int *size,
+					      void **va, gfp_t gfp_mask)
+{
+	void *nc_va;
+
+	nc_va = page_frag_cache_refill(nc, *size, gfp_mask);
+	if (unlikely(!nc_va))
+		return NULL;
+
+	*offset = nc->offset;
+	*va = nc_va + *offset;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+	*size = nc->size_mask - *offset + 1;
+#else
+	*size = PAGE_SIZE - *offset;
+#endif
+
+	return nc_va;
+}
+
+#define page_frag_alloc_prepare(nc, offset, size, va, gfp)		\
+({									\
+	struct page *__page = NULL;					\
+	void *__va;							\
+									\
+	__va = __page_frag_alloc_prepare(nc, offset, size, va, gfp);	\
+	if (likely(__va))						\
+		__page = virt_to_page(__va);				\
+									\
+	__page;								\
+})
+
+static inline void page_frag_alloc_commit(struct page_frag_cache *nc,
+					  unsigned int offset,
+					  unsigned int size)
+{
+	nc->pagecnt_bias--;
+	nc->offset = offset + size;
+}
+
+static inline void page_frag_alloc_commit_noref(struct page_frag_cache *nc,
+						unsigned int offset,
+						unsigned int size)
+{
+	nc->offset = offset + size;
+}
+
 void page_frag_free_va(void *addr);
 
 #endif
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index ae1393d0619a..cbd0ed82a596 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -81,8 +81,8 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
-void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
-			 gfp_t gfp_mask)
+void *page_frag_cache_refill(struct page_frag_cache *nc, unsigned int fragsz,
+			     gfp_t gfp_mask)
 {
 	unsigned long size_mask;
 	unsigned int offset;
@@ -120,7 +120,7 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 		set_page_count(page, size_mask);
 		nc->pagecnt_bias |= size_mask;
 
-		offset = 0;
+		nc->offset = 0;
 		if (unlikely(fragsz > (size_mask + 1))) {
 			/*
 			 * The caller is trying to allocate a fragment
@@ -135,12 +135,9 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
 		}
 	}
 
-	nc->pagecnt_bias--;
-	nc->offset = offset + fragsz;
-
-	return va + offset;
+	return va;
 }
-EXPORT_SYMBOL(page_frag_alloc_va);
+EXPORT_SYMBOL(page_frag_cache_refill);
 
 /*
  * Frees a page fragment allocated out of either a compound or order 0 page.
-- 
2.33.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ