lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231016233215.13090-5-madvenka@linux.microsoft.com>
Date:   Mon, 16 Oct 2023 18:32:09 -0500
From:   madvenka@...ux.microsoft.com
To:     gregkh@...uxfoundation.org, pbonzini@...hat.com, rppt@...nel.org,
        jgowans@...zon.com, graf@...zon.de, arnd@...db.de,
        keescook@...omium.org, stanislav.kinsburskii@...il.com,
        anthony.yznaga@...cle.com, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, madvenka@...ux.microsoft.com,
        jamorris@...ux.microsoft.com
Subject: [RFC PATCH v1 04/10] mm/prmem: Implement a page allocator for persistent memory

From: "Madhavan T. Venkataraman" <madvenka@...ux.microsoft.com>

Define the following convenience wrapper functions for allocating and
freeing pages:

	- prmem_alloc_pages()
	- prmem_free_pages()

The functions look similar to alloc_pages() and __free_pages(). However,
the only GFP flag that is processed is __GFP_ZERO to zero out the
allocated memory.

Signed-off-by: Madhavan T. Venkataraman <madvenka@...ux.microsoft.com>
---
 include/linux/prmem.h          |  7 ++++
 kernel/prmem/Makefile          |  1 +
 kernel/prmem/prmem_allocator.c | 74 ++++++++++++++++++++++++++++++++++
 kernel/prmem/prmem_init.c      |  2 +
 4 files changed, 84 insertions(+)
 create mode 100644 kernel/prmem/prmem_allocator.c

diff --git a/include/linux/prmem.h b/include/linux/prmem.h
index f43f5b0d2b9c..108683933c82 100644
--- a/include/linux/prmem.h
+++ b/include/linux/prmem.h
@@ -75,6 +75,7 @@ extern unsigned long		prmem_metadata;
 extern unsigned long		prmem_pa;
 extern size_t			prmem_size;
 extern bool			prmem_inited;
+extern spinlock_t		prmem_lock;
 
 /* Kernel API. */
 void prmem_reserve_early(void);
@@ -83,11 +84,17 @@ void prmem_init(void);
 void prmem_fini(void);
 int  prmem_cmdline_size(void);
 
+/* Allocator API. */
+struct page *prmem_alloc_pages(unsigned int order, gfp_t gfp);
+void prmem_free_pages(struct page *pages, unsigned int order);
+
 /* Internal functions. */
 struct prmem_region *prmem_add_region(unsigned long pa, size_t size);
 bool prmem_create_pool(struct prmem_region *region, bool new_region);
 void *prmem_alloc_pool(struct prmem_region *region, size_t size, int align);
 void prmem_free_pool(struct prmem_region *region, void *va, size_t size);
+void *prmem_alloc_pages_locked(unsigned int order);
+void prmem_free_pages_locked(void *va, unsigned int order);
 unsigned long prmem_checksum(void *start, size_t size);
 bool __init prmem_validate(void);
 void prmem_cmdline(char *cmdline);
diff --git a/kernel/prmem/Makefile b/kernel/prmem/Makefile
index 9b0a693bfee1..99bb19f0afd3 100644
--- a/kernel/prmem/Makefile
+++ b/kernel/prmem/Makefile
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-y += prmem_parse.o prmem_reserve.o prmem_init.o prmem_region.o prmem_misc.o
+obj-y += prmem_allocator.o
diff --git a/kernel/prmem/prmem_allocator.c b/kernel/prmem/prmem_allocator.c
new file mode 100644
index 000000000000..07a5a430630c
--- /dev/null
+++ b/kernel/prmem/prmem_allocator.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Persistent-Across-Kexec memory feature (prmem) - Allocator.
+ *
+ * Copyright (C) 2023 Microsoft Corporation
+ * Author: Madhavan T. Venkataraman (madvenka@...ux.microsoft.com)
+ */
+#include <linux/prmem.h>
+
+/* Page Allocation functions. */
+
+void *prmem_alloc_pages_locked(unsigned int order)
+{
+	struct prmem_region	*region;
+	void			*va;
+	size_t			size = (1UL << order) << PAGE_SHIFT;
+
+	list_for_each_entry(region, &prmem->regions, node) {
+		va = prmem_alloc_pool(region, size, size);
+		if (va)
+			return va;
+	}
+	return NULL;
+}
+
+struct page *prmem_alloc_pages(unsigned int order, gfp_t gfp)
+{
+	void		*va;
+	size_t		size = (1UL << order) << PAGE_SHIFT;
+	bool		zero = !!(gfp & __GFP_ZERO);
+
+	if (!prmem_inited || order > MAX_ORDER)
+		return NULL;
+
+	spin_lock(&prmem_lock);
+	va = prmem_alloc_pages_locked(order);
+	spin_unlock(&prmem_lock);
+
+	if (va) {
+		if (zero)
+			memset(va, 0, size);
+		return virt_to_page(va);
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(prmem_alloc_pages);
+
+void prmem_free_pages_locked(void *va, unsigned int order)
+{
+	struct prmem_region	*region;
+	size_t			size = (1UL << order) << PAGE_SHIFT;
+	void			*eva = va + size;
+	void			*region_va;
+
+	list_for_each_entry(region, &prmem->regions, node) {
+		/* The region structure is at the base of the region memory. */
+		region_va = region;
+		if (va >= region_va && eva <= (region_va + region->size)) {
+			prmem_free_pool(region, va, size);
+			return;
+		}
+	}
+}
+
+void prmem_free_pages(struct page *pages, unsigned int order)
+{
+	if (!prmem_inited || order > MAX_ORDER)
+		return;
+
+	spin_lock(&prmem_lock);
+	prmem_free_pages_locked(page_to_virt(pages), order);
+	spin_unlock(&prmem_lock);
+}
+EXPORT_SYMBOL_GPL(prmem_free_pages);
diff --git a/kernel/prmem/prmem_init.c b/kernel/prmem/prmem_init.c
index 56df1e6d3ebc..d23833d296fe 100644
--- a/kernel/prmem/prmem_init.c
+++ b/kernel/prmem/prmem_init.c
@@ -9,6 +9,8 @@
 
 bool			prmem_inited;
 
+DEFINE_SPINLOCK(prmem_lock);
+
 void __init prmem_init(void)
 {
 	if (!prmem)
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ