lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231016233215.13090-6-madvenka@linux.microsoft.com>
Date:   Mon, 16 Oct 2023 18:32:10 -0500
From:   madvenka@...ux.microsoft.com
To:     gregkh@...uxfoundation.org, pbonzini@...hat.com, rppt@...nel.org,
        jgowans@...zon.com, graf@...zon.de, arnd@...db.de,
        keescook@...omium.org, stanislav.kinsburskii@...il.com,
        anthony.yznaga@...cle.com, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, madvenka@...ux.microsoft.com,
        jamorris@...ux.microsoft.com
Subject: [RFC PATCH v1 05/10] mm/prmem: Implement a buffer allocator for persistent memory

From: "Madhavan T. Venkataraman" <madvenka@...ux.microsoft.com>

Implement functions that can allocate and free memory smaller than a page
size.

	- prmem_alloc()
	- prmem_free()

These functions look like kmalloc() and kfree(). However, the only GFP flag
that is processed is __GFP_ZERO to zero out the allocated memory.

To make the implementation simpler, create allocation caches for different
object sizes:

	8, 16, 32, 64, ..., PAGE_SIZE

For a given size, allocate from the appropriate cache. This idea has been
plagiarized from the kmem allocator.

To fill the cache of a specific size, allocate a page, break it up into
equal sized objects and add the objects to the cache. This is just a very
simple allocator. It does not attempt to do sophisticated things like
cache coloring, coalescing objects that belong to the same page so the
page can be freed, etc.

Signed-off-by: Madhavan T. Venkataraman <madvenka@...ux.microsoft.com>
---
 include/linux/prmem.h          |  12 ++++
 kernel/prmem/prmem_allocator.c | 112 ++++++++++++++++++++++++++++++++-
 2 files changed, 123 insertions(+), 1 deletion(-)

diff --git a/include/linux/prmem.h b/include/linux/prmem.h
index 108683933c82..1cb4660cf35e 100644
--- a/include/linux/prmem.h
+++ b/include/linux/prmem.h
@@ -50,6 +50,8 @@ struct prmem_region {
 	struct gen_pool_chunk	*chunk;
 };
 
+#define PRMEM_MAX_CACHES	14
+
 /*
  * PRMEM metadata.
  *
@@ -60,6 +62,9 @@ struct prmem_region {
  * size		Size of initial memory allocated to prmem.
  *
  * regions	List of memory regions.
+ *
+ * caches	Caches for different object sizes. For allocations smaller than
+ *		PAGE_SIZE, these caches are used.
  */
 struct prmem {
 	unsigned long		checksum;
@@ -68,6 +73,9 @@ struct prmem {
 
 	/* Persistent Regions. */
 	struct list_head	regions;
+
+	/* Allocation caches. */
+	void			*caches[PRMEM_MAX_CACHES];
 };
 
 extern struct prmem		*prmem;
@@ -87,6 +95,8 @@ int  prmem_cmdline_size(void);
 /* Allocator API. */
 struct page *prmem_alloc_pages(unsigned int order, gfp_t gfp);
 void prmem_free_pages(struct page *pages, unsigned int order);
+void *prmem_alloc(size_t size, gfp_t gfp);
+void prmem_free(void *va, size_t size);
 
 /* Internal functions. */
 struct prmem_region *prmem_add_region(unsigned long pa, size_t size);
@@ -95,6 +105,8 @@ void *prmem_alloc_pool(struct prmem_region *region, size_t size, int align);
 void prmem_free_pool(struct prmem_region *region, void *va, size_t size);
 void *prmem_alloc_pages_locked(unsigned int order);
 void prmem_free_pages_locked(void *va, unsigned int order);
+void *prmem_alloc_locked(size_t size);
+void prmem_free_locked(void *va, size_t size);
 unsigned long prmem_checksum(void *start, size_t size);
 bool __init prmem_validate(void);
 void prmem_cmdline(char *cmdline);
diff --git a/kernel/prmem/prmem_allocator.c b/kernel/prmem/prmem_allocator.c
index 07a5a430630c..f12975bc6777 100644
--- a/kernel/prmem/prmem_allocator.c
+++ b/kernel/prmem/prmem_allocator.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Persistent-Across-Kexec memory feature (prmem) - Allocator.
+ * Persistent-Across-Kexec memory (prmem) - Allocator.
  *
  * Copyright (C) 2023 Microsoft Corporation
  * Author: Madhavan T. Venkataraman (madvenka@...ux.microsoft.com)
@@ -72,3 +72,113 @@ void prmem_free_pages(struct page *pages, unsigned int order)
 	spin_unlock(&prmem_lock);
 }
 EXPORT_SYMBOL_GPL(prmem_free_pages);
+
+/* Buffer allocation functions. */
+
+#if PAGE_SIZE > 65536
+#error "Page size is too big"
+#endif
+
+static size_t	prmem_cache_sizes[PRMEM_MAX_CACHES] = {
+	8, 16, 32, 64, 128, 256, 512,
+	1024, 2048, 4096, 8192, 16384, 32768, 65536,
+};
+
+static int prmem_cache_index(size_t size)
+{
+	int	i;
+
+	for (i = 0; i < PRMEM_MAX_CACHES; i++) {
+		if (size <= prmem_cache_sizes[i])
+			return i;
+	}
+	BUG();
+}
+
+static void prmem_refill(void **cache, size_t size)
+{
+	void		*va;
+	int		i, n = PAGE_SIZE / size;
+
+	/* Allocate a page. */
+	va = prmem_alloc_pages_locked(0);
+	if (!va)
+		return;
+
+	/* Break up the page into pieces and put them in the cache. */
+	for (i = 0; i < n; i++, va += size) {
+		*((void **) va) = *cache;
+		*cache = va;
+	}
+}
+
+void *prmem_alloc_locked(size_t size)
+{
+	void		*va;
+	int		index;
+	void		**cache;
+
+	index = prmem_cache_index(size);
+	size = prmem_cache_sizes[index];
+
+	cache = &prmem->caches[index];
+	if (!*cache) {
+		/* Refill the cache. */
+		prmem_refill(cache, size);
+	}
+
+	/* Allocate one from the cache. */
+	va = *cache;
+	if (va)
+		*cache = *((void **) va);
+	return va;
+}
+
+void *prmem_alloc(size_t size, gfp_t gfp)
+{
+	void		*va;
+	bool		zero = !!(gfp & __GFP_ZERO);
+
+	if (!prmem_inited || !size)
+		return NULL;
+
+	/* This function is only for sizes up to a PAGE_SIZE. */
+	if (size > PAGE_SIZE)
+		return NULL;
+
+	spin_lock(&prmem_lock);
+	va = prmem_alloc_locked(size);
+	spin_unlock(&prmem_lock);
+
+	if (va && zero)
+		memset(va, 0, size);
+	return va;
+}
+EXPORT_SYMBOL_GPL(prmem_alloc);
+
+void prmem_free_locked(void *va, size_t size)
+{
+	int		index;
+	void		**cache;
+
+	/* Free the object into its cache. */
+	index = prmem_cache_index(size);
+	cache = &prmem->caches[index];
+	*((void **) va) = *cache;
+	*cache = va;
+}
+
+void prmem_free(void *va, size_t size)
+{
+	if (!prmem_inited || !va || !size)
+		return;
+
+	/* This function is only for sizes up to a PAGE_SIZE. */
+	if (size > PAGE_SIZE)
+		return;
+
+	spin_lock(&prmem_lock);
+	prmem_free_locked(va, size);
+	spin_unlock(&prmem_lock);
+}
+EXPORT_SYMBOL_GPL(prmem_free);
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ