[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231230161954.569267-8-michael.roth@amd.com>
Date: Sat, 30 Dec 2023 10:19:35 -0600
From: Michael Roth <michael.roth@....com>
To: <x86@...nel.org>
CC: <kvm@...r.kernel.org>, <linux-coco@...ts.linux.dev>, <linux-mm@...ck.org>,
<linux-crypto@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<tglx@...utronix.de>, <mingo@...hat.com>, <jroedel@...e.de>,
<thomas.lendacky@....com>, <hpa@...or.com>, <ardb@...nel.org>,
<pbonzini@...hat.com>, <seanjc@...gle.com>, <vkuznets@...hat.com>,
<jmattson@...gle.com>, <luto@...nel.org>, <dave.hansen@...ux.intel.com>,
<slp@...hat.com>, <pgonda@...gle.com>, <peterz@...radead.org>,
<srinivas.pandruvada@...ux.intel.com>, <rientjes@...gle.com>,
<tobin@....com>, <bp@...en8.de>, <vbabka@...e.cz>, <kirill@...temov.name>,
<ak@...ux.intel.com>, <tony.luck@...el.com>,
<sathyanarayanan.kuppuswamy@...ux.intel.com>, <alpergun@...gle.com>,
<jarkko@...nel.org>, <ashish.kalra@....com>, <nikunj.dadhania@....com>,
<pankaj.gupta@....com>, <liam.merwick@...cle.com>, <zhi.a.wang@...el.com>,
Brijesh Singh <brijesh.singh@....com>
Subject: [PATCH v1 07/26] x86/fault: Add helper for dumping RMP entries
From: Brijesh Singh <brijesh.singh@....com>
This information will be useful for debugging things like page faults
due to RMP access violations and RMPUPDATE failures.
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
Signed-off-by: Ashish Kalra <ashish.kalra@....com>
[mdr: move helper to standalone patch, rework dump logic to reduce
verbosity]
Signed-off-by: Michael Roth <michael.roth@....com>
---
arch/x86/include/asm/sev.h | 2 +
arch/x86/virt/svm/sev.c | 77 ++++++++++++++++++++++++++++++++++++++
2 files changed, 79 insertions(+)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 01ce61b283a3..2c53e3de0b71 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -247,9 +247,11 @@ static inline u64 sev_get_status(void) { return 0; }
#ifdef CONFIG_KVM_AMD_SEV
bool snp_probe_rmptable_info(void);
int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level);
+void snp_dump_hva_rmpentry(unsigned long address);
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
+static inline void snp_dump_hva_rmpentry(unsigned long address) {}
#endif
#endif
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index 49fdfbf4e518..7c9ced8911e9 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -266,3 +266,80 @@ int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level)
return 0;
}
EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
+
+/*
+ * Dump the raw RMP entry for a particular PFN. These bits are documented in the
+ * PPR for a particular CPU model and provide useful information about how a
+ * particular PFN is being utilized by the kernel/firmware at the time certain
+ * unexpected events occur, such as RMP faults.
+ */
+static void dump_rmpentry(u64 pfn)
+{
+ u64 pfn_current, pfn_end;
+ struct rmpentry *e;
+ u64 *e_data;
+ int level;
+
+ e = __snp_lookup_rmpentry(pfn, &level);
+ if (IS_ERR(e)) {
+ pr_info("Failed to read RMP entry for PFN 0x%llx, error %ld\n",
+ pfn, PTR_ERR(e));
+ return;
+ }
+
+ e_data = (u64 *)e;
+ if (e->assigned) {
+ pr_info("RMP entry for PFN 0x%llx: [high=0x%016llx low=0x%016llx]\n",
+ pfn, e_data[1], e_data[0]);
+ return;
+ }
+
+ /*
+ * If the RMP entry for a particular PFN is not in an assigned state,
+ * then it is sometimes useful to get an idea of whether or not any RMP
+ * entries for other PFNs within the same 2MB region are assigned, since
+ * those too can affect the ability to access a particular PFN in
+ * certain situations, such as when the PFN is being accessed via a 2MB
+ * mapping in the host page table.
+ */
+ pfn_current = ALIGN(pfn, PTRS_PER_PMD);
+ pfn_end = pfn_current + PTRS_PER_PMD;
+
+ while (pfn_current < pfn_end) {
+ e = __snp_lookup_rmpentry(pfn_current, &level);
+ if (IS_ERR(e)) {
+ pfn_current++;
+ continue;
+ }
+
+ e_data = (u64 *)e;
+ if (e_data[0] || e_data[1]) {
+ pr_info("No assigned RMP entry for PFN 0x%llx, but the 2MB region contains populated RMP entries, e.g.: PFN 0x%llx: [high=0x%016llx low=0x%016llx]\n",
+ pfn, pfn_current, e_data[1], e_data[0]);
+ return;
+ }
+ pfn_current++;
+ }
+
+ pr_info("No populated RMP entries in the 2MB region containing PFN 0x%llx\n",
+ pfn);
+}
+
+void snp_dump_hva_rmpentry(unsigned long hva)
+{
+ unsigned int level;
+ pgd_t *pgd;
+ pte_t *pte;
+
+ pgd = __va(read_cr3_pa());
+ pgd += pgd_index(hva);
+ pte = lookup_address_in_pgd(pgd, hva, &level);
+
+ if (!pte) {
+ pr_info("Can't dump RMP entry for HVA %lx: no PTE/PFN found\n", hva);
+ return;
+ }
+
+ dump_rmpentry(pte_pfn(*pte));
+}
+EXPORT_SYMBOL_GPL(snp_dump_hva_rmpentry);
--
2.25.1
Powered by blists - more mailing lists