[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231121212016.1154303-5-mhklinux@outlook.com>
Date: Tue, 21 Nov 2023 13:20:12 -0800
From: mhkelley58@...il.com
To: tglx@...utronix.de, mingo@...hat.com, bp@...en8.de,
dave.hansen@...ux.intel.com, x86@...nel.org, hpa@...or.com,
kirill.shutemov@...ux.intel.com, kys@...rosoft.com,
haiyangz@...rosoft.com, wei.liu@...nel.org, decui@...rosoft.com,
luto@...nel.org, peterz@...radead.org, akpm@...ux-foundation.org,
urezki@...il.com, hch@...radead.org, lstoakes@...il.com,
thomas.lendacky@....com, ardb@...nel.org, jroedel@...e.de,
seanjc@...gle.com, rick.p.edgecombe@...el.com,
sathyanarayanan.kuppuswamy@...ux.intel.com,
linux-kernel@...r.kernel.org, linux-coco@...ts.linux.dev,
linux-hyperv@...r.kernel.org, linux-mm@...ck.org
Subject: [PATCH v2 4/8] x86/sev: Enable PVALIDATE for PFNs without a valid virtual address
From: Michael Kelley <mhklinux@...look.com>
For SEV-SNP, the PVALIDATE instruction requires a valid virtual
address that it translates to the PFN that it operates on. Per
the spec, it translates the virtual address as if it were doing a
single byte read.
In transitioning a page between encrypted and decrypted, the direct
map virtual address of the page may be temporarily marked invalid
(i.e., PRESENT is cleared in the PTE) to prevent interference from
load_unaligned_zeropad(). In such a case, the PVALIDATE that is
required for the encrypted<->decrypted transition fails due to
an invalid virtual address.
Fix this by providing a temporary virtual address that is mapped to the
target PFN just before executing PVALIDATE. Have PVALIDATE use this
temp virtual address instead of the direct map virtual address. Unmap
the temp virtual address after PVALIDATE completes.
The temp virtual address must be aligned on a 2 Mbyte boundary
to meet PVALIDATE requirements for operating on 2 Meg large pages,
though the temp mapping need only be a 4K mapping. Also, the temp
virtual address must be preceded by a 4K invalid page so it can't
be accessed by load_unaligned_zeropad().
This mechanism is used only for pages transitioning between encrypted
and decrypted. When PVALIDATE is done for initial page acceptance,
a temp virtual address is not provided, and PVALIDATE uses the
direct map virtual address.
Signed-off-by: Michael Kelley <mhklinux@...look.com>
---
arch/x86/boot/compressed/sev.c | 2 +-
arch/x86/kernel/sev-shared.c | 57 +++++++++++++++++++++++++++-------
arch/x86/kernel/sev.c | 32 ++++++++++++-------
3 files changed, 67 insertions(+), 24 deletions(-)
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 454acd7a2daf..4d4a3fc0b725 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -224,7 +224,7 @@ static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
if (vmgexit_psc(boot_ghcb, desc))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
- pvalidate_pages(desc);
+ pvalidate_pages(desc, 0);
return pa;
}
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index ccb0915e84e1..fc45fdcf3892 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -1071,35 +1071,70 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
}
}
-static void pvalidate_pages(struct snp_psc_desc *desc)
+#ifdef __BOOT_COMPRESSED
+static int pvalidate_pfn(unsigned long vaddr, unsigned int size,
+ unsigned long pfn, bool validate, int *rc2)
+{
+ return 0;
+}
+#else
+static int pvalidate_pfn(unsigned long vaddr, unsigned int size,
+ unsigned long pfn, bool validate, int *rc2)
+{
+ int rc;
+ struct page *page = pfn_to_page(pfn);
+
+ *rc2 = vmap_pages_range(vaddr, vaddr + PAGE_SIZE,
+ PAGE_KERNEL, &page, PAGE_SHIFT);
+ rc = pvalidate(vaddr, size, validate);
+ vunmap_range(vaddr, vaddr + PAGE_SIZE);
+
+ return rc;
+}
+#endif
+
+static void pvalidate_pages(struct snp_psc_desc *desc, unsigned long vaddr)
{
struct psc_entry *e;
- unsigned long vaddr;
+ unsigned long pfn;
unsigned int size;
unsigned int i;
bool validate;
- int rc;
+ int rc, rc2 = 0;
for (i = 0; i <= desc->hdr.end_entry; i++) {
e = &desc->entries[i];
- vaddr = (unsigned long)pfn_to_kaddr(e->gfn);
- size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+ size = e->pagesize;
validate = e->operation == SNP_PAGE_STATE_PRIVATE;
+ pfn = e->gfn;
- rc = pvalidate(vaddr, size, validate);
- if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
- unsigned long vaddr_end = vaddr + PMD_SIZE;
+ if (vaddr) {
+ rc = pvalidate_pfn(vaddr, size, pfn, validate, &rc2);
+ } else {
+ vaddr = (unsigned long)pfn_to_kaddr(pfn);
+ rc = pvalidate(vaddr, size, validate);
+ }
- for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
- rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
+ if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
+ unsigned long last_pfn = pfn + PTRS_PER_PMD - 1;
+
+ for (; pfn <= last_pfn; pfn++) {
+ if (vaddr) {
+ rc = pvalidate_pfn(vaddr, RMP_PG_SIZE_4K,
+ pfn, validate, &rc2);
+ } else {
+ vaddr = (unsigned long)pfn_to_kaddr(pfn);
+ rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
+ }
if (rc)
break;
}
}
if (rc) {
- WARN(1, "Failed to validate address 0x%lx ret %d", vaddr, rc);
+ WARN(1, "Failed to validate address 0x%lx ret %d ret2 %d",
+ vaddr, rc, rc2);
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
}
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 7eac92c07a58..08b2e2a0d67d 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -790,7 +790,7 @@ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op
}
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
- unsigned long vaddr_end, int op)
+ unsigned long vaddr_end, int op, unsigned long temp_vaddr)
{
struct ghcb_state state;
bool use_large_entry;
@@ -842,7 +842,7 @@ static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long
/* Page validation must be rescinded before changing to shared */
if (op == SNP_PAGE_STATE_SHARED)
- pvalidate_pages(data);
+ pvalidate_pages(data, temp_vaddr);
local_irq_save(flags);
@@ -862,12 +862,13 @@ static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long
/* Page validation must be performed after changing to private */
if (op == SNP_PAGE_STATE_PRIVATE)
- pvalidate_pages(data);
+ pvalidate_pages(data, temp_vaddr);
return vaddr;
}
-static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
+static void set_pages_state(unsigned long vaddr, unsigned long npages,
+ int op, unsigned long temp_vaddr)
{
struct snp_psc_desc desc;
unsigned long vaddr_end;
@@ -880,23 +881,30 @@ static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
vaddr_end = vaddr + (npages << PAGE_SHIFT);
while (vaddr < vaddr_end)
- vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op);
+ vaddr = __set_pages_state(&desc, vaddr, vaddr_end,
+ op, temp_vaddr);
}
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
{
- if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
- return;
+ struct vm_struct *area;
+ unsigned long temp_vaddr;
- set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
+ area = get_vm_area(PAGE_SIZE * (PTRS_PER_PMD + 1), 0);
+ temp_vaddr = ALIGN((unsigned long)(area->addr + PAGE_SIZE), PMD_SIZE);
+ set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED, temp_vaddr);
+ free_vm_area(area);
}
void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
{
- if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
- return;
+ struct vm_struct *area;
+ unsigned long temp_vaddr;
- set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
+ area = get_vm_area(PAGE_SIZE * (PTRS_PER_PMD + 1), 0);
+ temp_vaddr = ALIGN((unsigned long)(area->addr + PAGE_SIZE), PMD_SIZE);
+ set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE, temp_vaddr);
+ free_vm_area(area);
}
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
@@ -909,7 +917,7 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
vaddr = (unsigned long)__va(start);
npages = (end - start) >> PAGE_SHIFT;
- set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
+ set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE, 0);
}
static int snp_set_vmsa(void *va, bool vmsa)
--
2.25.1
Powered by blists - more mailing lists