lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240126041126.1927228-7-michael.roth@amd.com>
Date: Thu, 25 Jan 2024 22:11:06 -0600
From: Michael Roth <michael.roth@....com>
To: <x86@...nel.org>
CC: <kvm@...r.kernel.org>, <linux-coco@...ts.linux.dev>, <linux-mm@...ck.org>,
	<linux-crypto@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
	<tglx@...utronix.de>, <mingo@...hat.com>, <jroedel@...e.de>,
	<thomas.lendacky@....com>, <hpa@...or.com>, <ardb@...nel.org>,
	<pbonzini@...hat.com>, <seanjc@...gle.com>, <vkuznets@...hat.com>,
	<jmattson@...gle.com>, <luto@...nel.org>, <dave.hansen@...ux.intel.com>,
	<slp@...hat.com>, <pgonda@...gle.com>, <peterz@...radead.org>,
	<srinivas.pandruvada@...ux.intel.com>, <rientjes@...gle.com>,
	<tobin@....com>, <bp@...en8.de>, <vbabka@...e.cz>, <kirill@...temov.name>,
	<ak@...ux.intel.com>, <tony.luck@...el.com>,
	<sathyanarayanan.kuppuswamy@...ux.intel.com>, <alpergun@...gle.com>,
	<jarkko@...nel.org>, <ashish.kalra@....com>, <nikunj.dadhania@....com>,
	<pankaj.gupta@....com>, <liam.merwick@...cle.com>, Brijesh Singh
	<brijesh.singh@....com>
Subject: [PATCH v2 06/25] x86/sev: Add RMP entry lookup helpers

From: Brijesh Singh <brijesh.singh@....com>

Add a helper that can be used to access information contained in the RMP
entry corresponding to a particular PFN. This will be needed to make
decisions on how to handle setting up mappings in the NPT in response to
guest page-faults and handling things like cleaning up pages and setting
them back to the default hypervisor-owned state when they are no longer
being used for private data.

Signed-off-by: Brijesh Singh <brijesh.singh@....com>
Co-developed-by: Ashish Kalra <ashish.kalra@....com>
Signed-off-by: Ashish Kalra <ashish.kalra@....com>
[mdr: separate 'assigned' indicator from return code, and simplify
      function signatures for various helpers]
Signed-off-by: Michael Roth <michael.roth@....com>
---
 arch/x86/include/asm/sev.h |  3 +++
 arch/x86/virt/svm/sev.c    | 49 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 52 insertions(+)

diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 1f59d8ba9776..01ce61b283a3 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -90,6 +90,7 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
 /* RMP page size */
 #define RMP_PG_SIZE_4K			0
 #define RMP_PG_SIZE_2M			1
+#define RMP_TO_PG_LEVEL(level)		(((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
 
 #define RMPADJUST_VMSA_PAGE_BIT		BIT(16)
 
@@ -245,8 +246,10 @@ static inline u64 sev_get_status(void) { return 0; }
 
 #ifdef CONFIG_KVM_AMD_SEV
 bool snp_probe_rmptable_info(void);
+int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level);
 #else
 static inline bool snp_probe_rmptable_info(void) { return false; }
+static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
 #endif
 
 #endif
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index 575a9ff046cb..7669b2ff0ec7 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -53,6 +53,9 @@ struct rmpentry {
  */
 #define RMPTABLE_CPU_BOOKKEEPING_SZ	0x4000
 
+/* Mask to apply to a PFN to get the first PFN of a 2MB page */
+#define PFN_PMD_MASK	GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
+
 static u64 probed_rmp_base, probed_rmp_size;
 static struct rmpentry *rmptable __ro_after_init;
 static u64 rmptable_max_pfn __ro_after_init;
@@ -214,3 +217,49 @@ static int __init snp_rmptable_init(void)
  * This must be called after the IOMMU has been initialized.
  */
 device_initcall(snp_rmptable_init);
+
+static struct rmpentry *get_rmpentry(u64 pfn)
+{
+	if (WARN_ON_ONCE(pfn > rmptable_max_pfn))
+		return ERR_PTR(-EFAULT);
+
+	return &rmptable[pfn];
+}
+
+static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
+{
+	struct rmpentry *large_entry, *entry;
+
+	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+		return ERR_PTR(-ENODEV);
+
+	entry = get_rmpentry(pfn);
+	if (IS_ERR(entry))
+		return entry;
+
+	/*
+	 * Find the authoritative RMP entry for a PFN. This can be either a 4K
+	 * RMP entry or a special large RMP entry that is authoritative for a
+	 * whole 2M area.
+	 */
+	large_entry = get_rmpentry(pfn & PFN_PMD_MASK);
+	if (IS_ERR(large_entry))
+		return large_entry;
+
+	*level = RMP_TO_PG_LEVEL(large_entry->pagesize);
+
+	return entry;
+}
+
+int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level)
+{
+	struct rmpentry *e;
+
+	e = __snp_lookup_rmpentry(pfn, level);
+	if (IS_ERR(e))
+		return PTR_ERR(e);
+
+	*assigned = !!e->assigned;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
-- 
2.25.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ