[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230127114108.10025-18-joey.gouly@arm.com>
Date: Fri, 27 Jan 2023 11:40:58 +0000
From: Joey Gouly <joey.gouly@....com>
To: Andrew Jones <andrew.jones@...ux.dev>, <kvmarm@...ts.linux.dev>,
<kvm@...r.kernel.org>
CC: <joey.gouly@....com>, Alexandru Elisei <alexandru.elisei@....com>,
Christoffer Dall <christoffer.dall@....com>,
Fuad Tabba <tabba@...gle.com>,
Jean-Philippe Brucker <jean-philippe@...aro.org>,
Joey Gouly <Joey.Gouly@....com>, Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Paolo Bonzini <pbonzini@...hat.com>,
Quentin Perret <qperret@...gle.com>,
Steven Price <steven.price@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
"Thomas Huth" <thuth@...hat.com>, Will Deacon <will@...nel.org>,
Zenghui Yu <yuzenghui@...wei.com>,
<linux-coco@...ts.linux.dev>, <kvmarm@...ts.cs.columbia.edu>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>
Subject: [RFC kvm-unit-tests 17/27] lib/alloc_page: Add shared page allocation support
Add support for allocating "pages" that can be shared with the host.
Or in other words, decrypted pages. This is achieved by adding hooks for
setting a memory region as "encrypted" or "decrypted", which can be overridden
by the architecture specific backends.
Also add a new flag - FLAG_SHARED - for allocating shared pages.
The page allocation/free routines get a "_shared_" variant too.
These will be later used for Realm support and tests.
Signed-off-by: Joey Gouly <joey.gouly@....com>
---
lib/alloc_page.c | 34 +++++++++++++++++++++++++++++++---
lib/alloc_page.h | 24 ++++++++++++++++++++++++
2 files changed, 55 insertions(+), 3 deletions(-)
diff --git a/lib/alloc_page.c b/lib/alloc_page.c
index 84f01e11..8b811b15 100644
--- a/lib/alloc_page.c
+++ b/lib/alloc_page.c
@@ -53,6 +53,20 @@ static struct mem_area areas[MAX_AREAS];
/* Mask of initialized areas */
static unsigned int areas_mask;
/* Protects areas and areas mask */
+
+#ifndef set_memory_encrypted
+static inline void set_memory_encrypted(unsigned long mem, unsigned long size)
+{
+}
+#endif
+
+#ifndef set_memory_decrypted
+static inline void set_memory_decrypted(unsigned long mem, unsigned long size)
+{
+}
+#endif
+
+
static struct spinlock lock;
bool page_alloc_initialized(void)
@@ -263,7 +277,7 @@ static bool coalesce(struct mem_area *a, u8 order, pfn_t pfn, pfn_t pfn2)
* - no pages in the memory block were already free
* - no pages in the memory block are special
*/
-static void _free_pages(void *mem)
+static void _free_pages(void *mem, u32 flags)
{
pfn_t pfn2, pfn = virt_to_pfn(mem);
struct mem_area *a = NULL;
@@ -281,6 +295,9 @@ static void _free_pages(void *mem)
p = pfn - a->base;
order = a->page_states[p] & ORDER_MASK;
+ if (flags & FLAG_SHARED)
+ set_memory_encrypted((unsigned long)mem, BIT(order) * PAGE_SIZE);
+
/* ensure that the first page is allocated and not special */
assert(IS_ALLOCATED(a->page_states[p]));
/* ensure that the order has a sane value */
@@ -320,7 +337,14 @@ static void _free_pages(void *mem)
void free_pages(void *mem)
{
spin_lock(&lock);
- _free_pages(mem);
+ _free_pages(mem, 0);
+ spin_unlock(&lock);
+}
+
+void free_pages_shared(void *mem)
+{
+ spin_lock(&lock);
+ _free_pages(mem, FLAG_SHARED);
spin_unlock(&lock);
}
@@ -353,7 +377,7 @@ static void _unreserve_one_page(pfn_t pfn)
i = pfn - a->base;
assert(a->page_states[i] == STATUS_SPECIAL);
a->page_states[i] = STATUS_ALLOCATED;
- _free_pages(pfn_to_virt(pfn));
+ _free_pages(pfn_to_virt(pfn), 0);
}
int reserve_pages(phys_addr_t addr, size_t n)
@@ -401,6 +425,10 @@ static void *page_memalign_order_flags(u8 al, u8 ord, u32 flags)
if (area & BIT(i))
res = page_memalign_order(areas + i, al, ord, fresh);
spin_unlock(&lock);
+
+ if (res && (flags & FLAG_SHARED))
+ set_memory_decrypted((unsigned long)res, BIT(ord) * PAGE_SIZE);
+
if (res && !(flags & FLAG_DONTZERO))
memset(res, 0, BIT(ord) * PAGE_SIZE);
return res;
diff --git a/lib/alloc_page.h b/lib/alloc_page.h
index 060e0418..847a7fda 100644
--- a/lib/alloc_page.h
+++ b/lib/alloc_page.h
@@ -21,6 +21,7 @@
#define FLAG_DONTZERO 0x10000
#define FLAG_FRESH 0x20000
+#define FLAG_SHARED 0x40000
/* Returns true if the page allocator has been initialized */
bool page_alloc_initialized(void);
@@ -121,4 +122,27 @@ int reserve_pages(phys_addr_t addr, size_t npages);
*/
void unreserve_pages(phys_addr_t addr, size_t npages);
+/* Shared page operations */
+static inline void *alloc_pages_shared(unsigned long order)
+{
+ return alloc_pages_flags(order, FLAG_SHARED);
+}
+
+static inline void *alloc_page_shared(void)
+{
+ return alloc_pages_shared(0);
+}
+
+void free_pages_shared(void *mem);
+
+static inline void free_page_shared(void *page)
+{
+ free_pages_shared(page);
+}
+
+static inline void free_pages_shared_by_order(void *mem, unsigned long order)
+{
+ free_pages_shared(mem);
+}
+
#endif
--
2.17.1
Powered by blists - more mailing lists