[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <170663196942.398.1001821466269981082.tip-bot2@tip-bot2>
Date: Tue, 30 Jan 2024 16:26:09 -0000
From: "tip-bot2 for Brijesh Singh" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Brijesh Singh <brijesh.singh@....com>, Ashish Kalra <ashish.kalra@....com>,
Michael Roth <michael.roth@....com>, "Borislav Petkov (AMD)" <bp@...en8.de>,
x86@...nel.org, linux-kernel@...r.kernel.org
Subject: [tip: x86/sev] x86/sev: Add RMP entry lookup helpers
The following commit has been merged into the x86/sev branch of tip:
Commit-ID: 94b36bc244bb134ec616dd3f2d37343cd8c1be54
Gitweb: https://git.kernel.org/tip/94b36bc244bb134ec616dd3f2d37343cd8c1be54
Author: Brijesh Singh <brijesh.singh@....com>
AuthorDate: Thu, 25 Jan 2024 22:11:06 -06:00
Committer: Borislav Petkov (AMD) <bp@...en8.de>
CommitterDate: Mon, 29 Jan 2024 17:25:55 +01:00
x86/sev: Add RMP entry lookup helpers
Add a helper that can be used to access information contained in the RMP
entry corresponding to a particular PFN. This will be needed to make
decisions on how to handle setting up mappings in the NPT in response to
guest page-faults and handling things like cleaning up pages and setting
them back to the default hypervisor-owned state when they are no longer
being used for private data.
[ mdr: separate 'assigned' indicator from return code, and simplify
function signatures for various helpers. ]
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
Co-developed-by: Ashish Kalra <ashish.kalra@....com>
Signed-off-by: Ashish Kalra <ashish.kalra@....com>
Signed-off-by: Michael Roth <michael.roth@....com>
Signed-off-by: Borislav Petkov (AMD) <bp@...en8.de>
Link: https://lore.kernel.org/r/20240126041126.1927228-7-michael.roth@amd.com
---
arch/x86/include/asm/sev.h | 3 ++-
arch/x86/virt/svm/sev.c | 49 +++++++++++++++++++++++++++++++++++++-
2 files changed, 52 insertions(+)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 1f59d8b..01ce61b 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -90,6 +90,7 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
/* RMP page size */
#define RMP_PG_SIZE_4K 0
#define RMP_PG_SIZE_2M 1
+#define RMP_TO_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
#define RMPADJUST_VMSA_PAGE_BIT BIT(16)
@@ -245,8 +246,10 @@ static inline u64 sev_get_status(void) { return 0; }
#ifdef CONFIG_KVM_AMD_SEV
bool snp_probe_rmptable_info(void);
+int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level);
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
+static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
#endif
#endif
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index 575a9ff..7669b2f 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -53,6 +53,9 @@ struct rmpentry {
*/
#define RMPTABLE_CPU_BOOKKEEPING_SZ 0x4000
+/* Mask to apply to a PFN to get the first PFN of a 2MB page */
+#define PFN_PMD_MASK GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
+
static u64 probed_rmp_base, probed_rmp_size;
static struct rmpentry *rmptable __ro_after_init;
static u64 rmptable_max_pfn __ro_after_init;
@@ -214,3 +217,49 @@ nosnp:
* This must be called after the IOMMU has been initialized.
*/
device_initcall(snp_rmptable_init);
+
+static struct rmpentry *get_rmpentry(u64 pfn)
+{
+ if (WARN_ON_ONCE(pfn > rmptable_max_pfn))
+ return ERR_PTR(-EFAULT);
+
+ return &rmptable[pfn];
+}
+
+static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
+{
+ struct rmpentry *large_entry, *entry;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ return ERR_PTR(-ENODEV);
+
+ entry = get_rmpentry(pfn);
+ if (IS_ERR(entry))
+ return entry;
+
+ /*
+ * Find the authoritative RMP entry for a PFN. This can be either a 4K
+ * RMP entry or a special large RMP entry that is authoritative for a
+ * whole 2M area.
+ */
+ large_entry = get_rmpentry(pfn & PFN_PMD_MASK);
+ if (IS_ERR(large_entry))
+ return large_entry;
+
+ *level = RMP_TO_PG_LEVEL(large_entry->pagesize);
+
+ return entry;
+}
+
+int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level)
+{
+ struct rmpentry *e;
+
+ e = __snp_lookup_rmpentry(pfn, level);
+ if (IS_ERR(e))
+ return PTR_ERR(e);
+
+ *assigned = !!e->assigned;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
Powered by blists - more mailing lists