[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210602141057.27107-35-brijesh.singh@amd.com>
Date: Wed, 2 Jun 2021 09:10:54 -0500
From: Brijesh Singh <brijesh.singh@....com>
To: x86@...nel.org, linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
linux-coco@...ts.linux.dev, linux-mm@...ck.org,
linux-crypto@...r.kernel.org
Cc: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Joerg Roedel <jroedel@...e.de>,
Tom Lendacky <thomas.lendacky@....com>,
"H. Peter Anvin" <hpa@...or.com>, Ard Biesheuvel <ardb@...nel.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Andy Lutomirski <luto@...nel.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Sergio Lopez <slp@...hat.com>, Peter Gonda <pgonda@...gle.com>,
Peter Zijlstra <peterz@...radead.org>,
Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
David Rientjes <rientjes@...gle.com>, tony.luck@...el.com,
npmccallum@...hat.com, Borislav Petkov <bp@...e.de>,
Brijesh Singh <brijesh.singh@....com>
Subject: [PATCH Part2 RFC v3 34/37] KVM: SVM: Add support to handle the RMP nested page fault
Follow the recommendation from APM2 section 15.36.10 and 15.36.11 to
resolve the RMP violation encountered during the NPT table walk.
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
---
arch/x86/include/asm/kvm_host.h | 3 ++
arch/x86/kvm/mmu/mmu.c | 20 ++++++++++++
arch/x86/kvm/svm/sev.c | 57 +++++++++++++++++++++++++++++++++
arch/x86/kvm/svm/svm.c | 2 ++
arch/x86/kvm/svm/svm.h | 2 ++
5 files changed, 84 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 46323af09995..117e2e08d7ed 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1399,6 +1399,9 @@ struct kvm_x86_ops {
void (*write_page_begin)(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn);
void (*write_page_end)(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn);
+
+ int (*handle_rmp_page_fault)(struct kvm_vcpu *vcpu, gpa_t gpa, kvm_pfn_t pfn,
+ int level, u64 error_code);
};
struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e60f54455cdc..b6a676ba1862 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5096,6 +5096,18 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
write_unlock(&vcpu->kvm->mmu_lock);
}
+static int handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
+{
+ kvm_pfn_t pfn;
+ int level;
+
+ if (unlikely(!kvm_mmu_get_tdp_walk(vcpu, gpa, &pfn, &level)))
+ return RET_PF_RETRY;
+
+ kvm_x86_ops.handle_rmp_page_fault(vcpu, gpa, pfn, level, error_code);
+ return RET_PF_RETRY;
+}
+
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len)
{
@@ -5112,6 +5124,14 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
goto emulate;
}
+ if (unlikely(error_code & PFERR_GUEST_RMP_MASK)) {
+ r = handle_rmp_page_fault(vcpu, cr2_or_gpa, error_code);
+ if (r == RET_PF_RETRY)
+ return 1;
+ else
+ return r;
+ }
+
if (r == RET_PF_INVALID) {
r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
lower_32_bits(error_code), false);
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index d30419e91288..5b033d4c3b92 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3497,3 +3497,60 @@ void sev_snp_write_page_begin(struct kvm *kvm, struct kvm_memory_slot *slot, gfn
BUG_ON(rc != 0);
}
}
+
+int snp_handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, kvm_pfn_t pfn,
+ int level, u64 error_code)
+{
+ struct rmpentry *e;
+ int rlevel, rc = 0;
+ bool private;
+ gfn_t gfn;
+
+ e = snp_lookup_page_in_rmptable(pfn_to_page(pfn), &rlevel);
+ if (!e)
+ return 1;
+
+ private = !!(error_code & PFERR_GUEST_ENC_MASK);
+
+ /*
+ * See APM section 15.36.11 on how to handle the RMP fault for the large pages.
+ *
+ * npt rmp access action
+ * --------------------------------------------------
+ * 4k 2M C=1 psmash
+ * x x C=1 if page is not private then add a new RMP entry
+ * x x C=0 if page is private then make it shared
+ * 2M 4k C=x zap
+ */
+ if ((error_code & PFERR_GUEST_SIZEM_MASK) ||
+ ((level == PG_LEVEL_4K) && (rlevel == PG_LEVEL_2M) && private)) {
+ rc = snp_rmptable_psmash(vcpu, pfn);
+ goto zap_gfn;
+ }
+
+ /*
+ * If it's a private access, and the page is not assigned in the RMP table, create a
+ * new private RMP entry.
+ */
+ if (!rmpentry_assigned(e) && private) {
+ rc = snp_make_page_private(vcpu, gpa, pfn, PG_LEVEL_4K);
+ goto zap_gfn;
+ }
+
+ /*
+ * If it's a shared access, then make the page shared in the RMP table.
+ */
+ if (rmpentry_assigned(e) && !private)
+ rc = snp_make_page_shared(vcpu, gpa, pfn, PG_LEVEL_4K);
+
+zap_gfn:
+ /*
+ * Now that we have updated the RMP pagesize, zap the existing rmaps for
+ * large entry ranges so that nested page table gets rebuilt with the updated RMP
+ * pagesize.
+ */
+ gfn = gpa_to_gfn(gpa) & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1);
+ kvm_zap_gfn_range(vcpu->kvm, gfn, gfn + 512);
+
+ return 0;
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 4ff6fc86dd18..32e35d396508 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4579,6 +4579,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.get_tdp_max_page_level = sev_get_tdp_max_page_level,
.write_page_begin = sev_snp_write_page_begin,
+
+ .handle_rmp_page_fault = snp_handle_rmp_page_fault,
};
static struct kvm_x86_init_ops svm_init_ops __initdata = {
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index e0276ad8a1ae..ccdaaa4e1fb1 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -577,6 +577,8 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
int sev_get_tdp_max_page_level(struct kvm_vcpu *vcpu, gpa_t gpa, int max_level);
void sev_snp_write_page_begin(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn);
+int snp_handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, kvm_pfn_t pfn,
+ int level, u64 error_code);
/* vmenter.S */
--
2.17.1
Powered by blists - more mailing lists