[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220223043528.2093214-1-brijesh.singh@amd.com>
Date: Tue, 22 Feb 2022 22:35:28 -0600
From: Brijesh Singh <brijesh.singh@....com>
To: <x86@...nel.org>, <linux-kernel@...r.kernel.org>,
<kvm@...r.kernel.org>, <linux-coco@...ts.linux.dev>
CC: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Joerg Roedel <jroedel@...e.de>,
Tom Lendacky <thomas.lendacky@....com>,
"H. Peter Anvin" <hpa@...or.com>,
Paolo Bonzini <pbonzini@...hat.com>,
"Sean Christopherson" <seanjc@...gle.com>,
Andy Lutomirski <luto@...nel.org>,
"Dave Hansen" <dave.hansen@...ux.intel.com>,
Peter Gonda <pgonda@...gle.com>,
"Peter Zijlstra" <peterz@...radead.org>,
David Rientjes <rientjes@...gle.com>,
Borislav Petkov <bp@...en8.de>,
Michael Roth <michael.roth@....com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Andi Kleen <ak@...ux.intel.com>,
Brijesh Singh <brijesh.singh@....com>
Subject: [PATCH] x86/mm/cpa: Generalize __set_memory_enc_pgtable()
The kernel provides infrastructure to set or clear the encryption mask
from the pages for AMD SEV, but TDX requires few tweaks.
- TDX and SEV have different requirements to the cache and tlb
flushing.
- TDX has own routine to notify VMM about page encryption status change.
Modify __set_memory_enc_pgtable() and make it flexible enough to cover
both AMD SEV and Intel TDX. The AMD-specific behavior is isolated in
callback under x86_platform.cc. TDX will provide own version of the
callbacks.
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
---
Depends on Krill's CC cleanup
https://lore.kernel.org/all/20220222185740.26228-1-kirill.shutemov@linux.intel.com/
arch/x86/include/asm/set_memory.h | 1 -
arch/x86/include/asm/x86_init.h | 21 +++++++++
arch/x86/mm/mem_encrypt_amd.c | 75 ++++++++++++++++++++++---------
arch/x86/mm/pat/set_memory.c | 20 +++++----
4 files changed, 85 insertions(+), 32 deletions(-)
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index ff0f2d90338a..ce8dd215f5b3 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -84,7 +84,6 @@ int set_pages_rw(struct page *page, int numpages);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
-void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);
extern int kernel_set_to_readonly;
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 22b7412c08f6..dce92e2cb9e1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -141,6 +141,26 @@ struct x86_init_acpi {
void (*reduced_hw_early_init)(void);
};
+/**
+ * struct x86_cc_runtime - Functions used by misc guest incarnations like SEV, TDX, etc.
+ *
+ * @enc_status_change_prepare Notify HV before the encryption status of a range
+ * is changed.
+ *
+ * @enc_status_change_finish Notify HV after the encryption status of a range
+ * is changed.
+ *
+ * @enc_tlb_flush_required Flush the TLB before changing the encryption status.
+ *
+ * @enc_cache_flush_required Flush the caches before changing the encryption status.
+ */
+struct x86_cc_runtime {
+ void (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
+ void (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
+ bool (*enc_tlb_flush_required)(bool enc);
+ bool (*enc_cache_flush_required)(void);
+};
+
/**
* struct x86_init_ops - functions for platform specific setup
*
@@ -287,6 +307,7 @@ struct x86_platform_ops {
struct x86_legacy_features legacy;
void (*set_legacy_features)(void);
struct x86_hyper_runtime hyper;
+ const struct x86_cc_runtime *cc;
};
struct x86_apic_ops {
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 2b2d018ea345..22b86af5edf1 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -177,25 +177,6 @@ void __init sme_map_bootdata(char *real_mode_data)
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
}
-void __init sme_early_init(void)
-{
- unsigned int i;
-
- if (!sme_me_mask)
- return;
-
- early_pmd_flags = __sme_set(early_pmd_flags);
-
- __supported_pte_mask = __sme_set(__supported_pte_mask);
-
- /* Update the protection map with memory encryption mask */
- for (i = 0; i < ARRAY_SIZE(protection_map); i++)
- protection_map[i] = pgprot_encrypted(protection_map[i]);
-
- if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
- swiotlb_force = SWIOTLB_FORCE;
-}
-
void __init sev_setup_arch(void)
{
phys_addr_t total_mem = memblock_phys_mem_size();
@@ -256,7 +237,17 @@ static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
return pfn;
}
-void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
+static bool amd_enc_tlb_flush_required(bool enc)
+{
+ return true;
+}
+
+static bool amd_enc_cache_flush_required(void)
+{
+ return !this_cpu_has(X86_FEATURE_SME_COHERENT);
+}
+
+static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
{
#ifdef CONFIG_PARAVIRT
unsigned long sz = npages << PAGE_SHIFT;
@@ -287,6 +278,18 @@ void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
#endif
}
+static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
+{
+}
+
+static void amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
+{
+ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ return;
+
+ enc_dec_hypercall(vaddr, npages, enc);
+}
+
static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
{
pgprot_t old_prot, new_prot;
@@ -392,7 +395,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
ret = 0;
- notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
+ early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
out:
__flush_tlb_all();
return ret;
@@ -410,7 +413,35 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
{
- notify_range_enc_status_changed(vaddr, npages, enc);
+ enc_dec_hypercall(vaddr, npages, enc);
+}
+
+static const struct x86_cc_runtime amd_cc_runtime = {
+ .enc_status_change_prepare = amd_enc_status_change_prepare,
+ .enc_status_change_finish = amd_enc_status_change_finish,
+ .enc_tlb_flush_required = amd_enc_tlb_flush_required,
+ .enc_cache_flush_required = amd_enc_cache_flush_required,
+};
+
+void __init sme_early_init(void)
+{
+ unsigned int i;
+
+ if (!sme_me_mask)
+ return;
+
+ early_pmd_flags = __sme_set(early_pmd_flags);
+
+ __supported_pte_mask = __sme_set(__supported_pte_mask);
+
+ /* Update the protection map with memory encryption mask */
+ for (i = 0; i < ARRAY_SIZE(protection_map); i++)
+ protection_map[i] = pgprot_encrypted(protection_map[i]);
+
+ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+ swiotlb_force = SWIOTLB_FORCE;
+
+ x86_platform.cc = &amd_cc_runtime;
}
void __init mem_encrypt_free_decrypted_mem(void)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index af77dbfd143c..4de2a7509039 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1997,6 +1997,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
addr &= PAGE_MASK;
+ BUG_ON(!x86_platform.cc);
+
memset(&cpa, 0, sizeof(cpa));
cpa.vaddr = &addr;
cpa.numpages = numpages;
@@ -2008,10 +2010,12 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
kmap_flush_unused();
vm_unmap_aliases();
- /*
- * Before changing the encryption attribute, we need to flush caches.
- */
- cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
+ /* Flush the caches as needed before changing the encryption attribute. */
+ if (x86_platform.cc->enc_tlb_flush_required(enc))
+ cpa_flush(&cpa, x86_platform.cc->enc_cache_flush_required());
+
+ /* Notify hypervisor that we are about to set/clr encryption attribute. */
+ x86_platform.cc->enc_status_change_prepare(addr, numpages, enc);
ret = __change_page_attr_set_clr(&cpa, 1);
@@ -2024,11 +2028,9 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
*/
cpa_flush(&cpa, 0);
- /*
- * Notify hypervisor that a given memory range is mapped encrypted
- * or decrypted.
- */
- notify_range_enc_status_changed(addr, numpages, enc);
+ /* Notify hypervisor that we have successfully set/clr encryption attribute. */
+ if (!ret)
+ x86_platform.cc->enc_status_change_finish(addr, numpages, enc);
return ret;
}
--
2.25.1
Powered by blists - more mailing lists