[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210324170436.31843-3-brijesh.singh@amd.com>
Date: Wed, 24 Mar 2021 12:04:08 -0500
From: Brijesh Singh <brijesh.singh@....com>
To: linux-kernel@...r.kernel.org, x86@...nel.org, kvm@...r.kernel.org,
linux-crypto@...r.kernel.org
Cc: ak@...ux.intel.com, herbert@...dor.apana.org.au,
Brijesh Singh <brijesh.singh@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Joerg Roedel <jroedel@...e.de>,
"H. Peter Anvin" <hpa@...or.com>, Tony Luck <tony.luck@...el.com>,
Dave Hansen <dave.hansen@...el.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Tom Lendacky <thomas.lendacky@....com>,
David Rientjes <rientjes@...gle.com>,
Sean Christopherson <seanjc@...gle.com>
Subject: [RFC Part2 PATCH 02/30] x86/sev-snp: add RMP entry lookup helpers
The lookup_page_in_rmptable() can be used by the host to read the RMP
entry for a given page. The RMP entry format is documented in PPR
section 2.1.5.2.
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Joerg Roedel <jroedel@...e.de>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Tony Luck <tony.luck@...el.com>
Cc: Dave Hansen <dave.hansen@...el.com>
Cc: "Peter Zijlstra (Intel)" <peterz@...radead.org>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: Tom Lendacky <thomas.lendacky@....com>
Cc: David Rientjes <rientjes@...gle.com>
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: x86@...nel.org
Cc: kvm@...r.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
---
arch/x86/include/asm/sev-snp.h | 31 +++++++++++++++++++++++++++++++
arch/x86/mm/mem_encrypt.c | 32 ++++++++++++++++++++++++++++++++
2 files changed, 63 insertions(+)
diff --git a/arch/x86/include/asm/sev-snp.h b/arch/x86/include/asm/sev-snp.h
index f7280d5c6158..2aa14b38c5ed 100644
--- a/arch/x86/include/asm/sev-snp.h
+++ b/arch/x86/include/asm/sev-snp.h
@@ -67,6 +67,35 @@ struct __packed snp_page_state_change {
#define X86_RMP_PG_LEVEL(level) (((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M)
#define RMP_X86_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
+/* RMP table entry format (PPR section 2.1.5.2) */
+struct __packed rmpentry {
+ union {
+ struct {
+ uint64_t assigned:1;
+ uint64_t pagesize:1;
+ uint64_t immutable:1;
+ uint64_t rsvd1:9;
+ uint64_t gpa:39;
+ uint64_t asid:10;
+ uint64_t vmsa:1;
+ uint64_t validated:1;
+ uint64_t rsvd2:1;
+ } info;
+ uint64_t low;
+ };
+ uint64_t high;
+};
+
+typedef struct rmpentry rmpentry_t;
+
+#define rmpentry_assigned(x) ((x)->info.assigned)
+#define rmpentry_pagesize(x) (RMP_X86_PG_LEVEL((x)->info.pagesize))
+#define rmpentry_vmsa(x) ((x)->info.vmsa)
+#define rmpentry_asid(x) ((x)->info.asid)
+#define rmpentry_validated(x) ((x)->info.validated)
+#define rmpentry_gpa(x) ((unsigned long)(x)->info.gpa)
+#define rmpentry_immutable(x) ((x)->info.immutable)
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
#include <linux/jump_label.h>
@@ -94,6 +123,7 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
unsigned int npages);
int snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
int snp_set_memory_private(unsigned long vaddr, unsigned int npages);
+rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level);
extern struct static_key_false snp_enable_key;
static inline bool snp_key_active(void)
@@ -124,6 +154,7 @@ early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned i
static inline int snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { return 0; }
static inline int snp_set_memory_private(unsigned long vaddr, unsigned int npages) { return 0; }
static inline bool snp_key_active(void) { return false; }
+static inline rpmentry_t *lookup_page_in_rmptable(struct page *page, int *level) { return NULL; }
#endif /* CONFIG_AMD_MEM_ENCRYPT */
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 39461b9cb34e..06394b6d56b2 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -34,6 +34,8 @@
#include "mm_internal.h"
+#define rmptable_page_offset(x) (0x4000 + (((unsigned long) x) >> 8))
+
/*
* Since SME related variables are set early in the boot process they must
* reside in the .data section so as not to be zeroed out when the .bss
@@ -612,3 +614,33 @@ static int __init mem_encrypt_snp_init(void)
* SEV-SNP must be enabled across all CPUs, so make the initialization as a late initcall.
*/
late_initcall(mem_encrypt_snp_init);
+
+rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level)
+{
+ unsigned long phys = page_to_pfn(page) << PAGE_SHIFT;
+ rmpentry_t *entry, *large_entry;
+ unsigned long vaddr;
+
+ if (!static_branch_unlikely(&snp_enable_key))
+ return NULL;
+
+ vaddr = rmptable_start + rmptable_page_offset(phys);
+ if (WARN_ON(vaddr > rmptable_end))
+ return NULL;
+
+ entry = (rmpentry_t *)vaddr;
+
+ /*
+ * Check if this page is covered by the large RMP entry. This is needed to get
+ * the page level used in the RMP entry.
+ *
+ * e.g. if the page is covered by the large RMP entry then page size is set in the
+ * base RMP entry.
+ */
+ vaddr = rmptable_start + rmptable_page_offset(phys & PMD_MASK);
+ large_entry = (rmpentry_t *)vaddr;
+ *level = rmpentry_pagesize(large_entry);
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(lookup_page_in_rmptable);
--
2.17.1
Powered by blists - more mailing lists