[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230127114108.10025-20-joey.gouly@arm.com>
Date: Fri, 27 Jan 2023 11:41:00 +0000
From: Joey Gouly <joey.gouly@....com>
To: Andrew Jones <andrew.jones@...ux.dev>, <kvmarm@...ts.linux.dev>,
<kvm@...r.kernel.org>
CC: <joey.gouly@....com>, Alexandru Elisei <alexandru.elisei@....com>,
Christoffer Dall <christoffer.dall@....com>,
Fuad Tabba <tabba@...gle.com>,
Jean-Philippe Brucker <jean-philippe@...aro.org>,
Joey Gouly <Joey.Gouly@....com>, Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Paolo Bonzini <pbonzini@...hat.com>,
Quentin Perret <qperret@...gle.com>,
Steven Price <steven.price@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
"Thomas Huth" <thuth@...hat.com>, Will Deacon <will@...nel.org>,
Zenghui Yu <yuzenghui@...wei.com>,
<linux-coco@...ts.linux.dev>, <kvmarm@...ts.cs.columbia.edu>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>
Subject: [RFC kvm-unit-tests 19/27] arm: realm: Enable memory encryption
From: Suzuki K Poulose <suzuki.poulose@....com>
Enable memory encryption support for Realms.
When a page is "decrypted", we set the RIPAS to EMPTY, hinting to the hypervisor
that it could reclaim the page backing the IPA. Also the pagetable is updated
with the PTE_NS_SHARED attrbiute, whic in effect turns the "ipa" to the
unprotected alias.
Similarly for "encryption" we mark the IPA back to RIPAS_RAM and clear the
PTE_NS_SHARED attribute.
Signed-off-by: Suzuki K Poulose <suzuki.poulose@....com>
Signed-off-by: Joey Gouly <joey.gouly@....com>
---
lib/arm/mmu.c | 65 ++++++++++++++++++++++++++++++++++++++++++++--
lib/arm64/asm/io.h | 6 +++++
2 files changed, 69 insertions(+), 2 deletions(-)
diff --git a/lib/arm/mmu.c b/lib/arm/mmu.c
index 2b5a7141..d4fbe56a 100644
--- a/lib/arm/mmu.c
+++ b/lib/arm/mmu.c
@@ -22,6 +22,7 @@
#include <linux/compiler.h>
pgd_t *mmu_idmap;
+unsigned long idmap_end;
/* Used by Realms, depends on IPA size */
unsigned long prot_ns_shared = 0;
@@ -30,6 +31,11 @@ unsigned long phys_mask_shift = 48;
/* CPU 0 starts with disabled MMU */
static cpumask_t mmu_enabled_cpumask;
+static bool is_idmap_address(phys_addr_t pa)
+{
+ return pa < idmap_end;
+}
+
bool mmu_enabled(void)
{
/*
@@ -92,12 +98,17 @@ static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
return &pte_val(*pte);
}
+static void set_pte(uintptr_t vaddr, pteval_t *p_pte, pteval_t pte)
+{
+ WRITE_ONCE(*p_pte, pte);
+ flush_tlb_page(vaddr);
+}
+
static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte)
{
pteval_t *p_pte = get_pte(pgtable, vaddr);
- WRITE_ONCE(*p_pte, pte);
- flush_tlb_page(vaddr);
+ set_pte(vaddr, p_pte, pte);
return p_pte;
}
@@ -122,6 +133,39 @@ phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *mem)
+ ((ulong)mem & (PAGE_SIZE - 1));
}
+/*
+ * __idmap_set_range_prot - Apply permissions to the given idmap range.
+ */
+static void __idmap_set_range_prot(unsigned long virt_offset, size_t size, pgprot_t prot)
+{
+ pteval_t *ptep;
+ pteval_t default_prot = PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
+
+ while (size > 0) {
+ pteval_t pte = virt_offset | default_prot | pgprot_val(prot);
+
+ if (!is_idmap_address(virt_offset))
+ break;
+ /* Break before make : Clear the PTE entry first */
+ ptep = install_pte(mmu_idmap, (uintptr_t)virt_offset, 0);
+ /* Now apply the changes */
+ set_pte((uintptr_t)virt_offset, ptep, pte);
+
+ size -= PAGE_SIZE;
+ virt_offset += PAGE_SIZE;
+ }
+}
+
+static void idmap_set_range_shared(unsigned long virt_offset, size_t size)
+{
+ return __idmap_set_range_prot(virt_offset, size, __pgprot(PTE_WBWA | PTE_USER | PTE_NS_SHARED));
+}
+
+static void idmap_set_range_protected(unsigned long virt_offset, size_t size)
+{
+ __idmap_set_range_prot(virt_offset, size, __pgprot(PTE_WBWA | PTE_USER));
+}
+
void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
phys_addr_t phys_start, phys_addr_t phys_end,
pgprot_t prot)
@@ -190,6 +234,7 @@ void *setup_mmu(phys_addr_t phys_end, void *unused)
}
mmu_enable(mmu_idmap);
+ idmap_end = phys_end;
return mmu_idmap;
}
@@ -278,3 +323,19 @@ void mmu_clear_user(pgd_t *pgtable, unsigned long vaddr)
flush_tlb_page(vaddr);
}
}
+
+void set_memory_encrypted(unsigned long va, size_t size)
+{
+ if (is_realm()) {
+ arm_set_memory_protected(__virt_to_phys(va), size);
+ idmap_set_range_protected(va, size);
+ }
+}
+
+void set_memory_decrypted(unsigned long va, size_t size)
+{
+ if (is_realm()) {
+ arm_set_memory_shared(__virt_to_phys(va), size);
+ idmap_set_range_shared(va, size);
+ }
+}
diff --git a/lib/arm64/asm/io.h b/lib/arm64/asm/io.h
index be19f471..3f71254d 100644
--- a/lib/arm64/asm/io.h
+++ b/lib/arm64/asm/io.h
@@ -89,6 +89,12 @@ static inline void *phys_to_virt(phys_addr_t x)
return (void *)__phys_to_virt(x);
}
+extern void set_memory_decrypted(unsigned long va, size_t size);
+#define set_memory_decrypted set_memory_decrypted
+
+extern void set_memory_encrypted(unsigned long va, size_t size);
+#define set_memory_encrypted set_memory_encrypted
+
#include <asm-generic/io.h>
#endif /* _ASMARM64_IO_H_ */
--
2.17.1
Powered by blists - more mailing lists