[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230220183847.59159-49-michael.roth@amd.com>
Date: Mon, 20 Feb 2023 12:38:39 -0600
From: Michael Roth <michael.roth@....com>
To: <kvm@...r.kernel.org>
CC: <linux-coco@...ts.linux.dev>, <linux-mm@...ck.org>,
<linux-crypto@...r.kernel.org>, <x86@...nel.org>,
<linux-kernel@...r.kernel.org>, <tglx@...utronix.de>,
<mingo@...hat.com>, <jroedel@...e.de>, <thomas.lendacky@....com>,
<hpa@...or.com>, <ardb@...nel.org>, <pbonzini@...hat.com>,
<seanjc@...gle.com>, <vkuznets@...hat.com>, <jmattson@...gle.com>,
<luto@...nel.org>, <dave.hansen@...ux.intel.com>, <slp@...hat.com>,
<pgonda@...gle.com>, <peterz@...radead.org>,
<srinivas.pandruvada@...ux.intel.com>, <rientjes@...gle.com>,
<dovmurik@...ux.ibm.com>, <tobin@....com>, <bp@...en8.de>,
<vbabka@...e.cz>, <kirill@...temov.name>, <ak@...ux.intel.com>,
<tony.luck@...el.com>, <marcorr@...gle.com>,
<sathyanarayanan.kuppuswamy@...ux.intel.com>,
<alpergun@...gle.com>, <dgilbert@...hat.com>, <jarkko@...nel.org>,
<ashish.kalra@....com>, <nikunj.dadhania@....com>
Subject: [PATCH RFC v8 48/56] KVM: SVM: Add SNP-specific handling for memory attribute updates
This will handle RMP table updates and direct map changes needed for
page state conversions requested by userspace.
Signed-off-by: Michael Roth <michael.roth@....com>
---
arch/x86/kvm/svm/sev.c | 126 +++++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/svm/svm.c | 1 +
arch/x86/kvm/svm/svm.h | 2 +
3 files changed, 129 insertions(+)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index b2f1a12685ed..73d614c538da 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3381,6 +3381,31 @@ static int snp_rmptable_psmash(struct kvm *kvm, kvm_pfn_t pfn)
return psmash(pfn);
}
+static int snp_make_page_shared(struct kvm *kvm, gpa_t gpa, kvm_pfn_t pfn, int level)
+{
+ int rc, rmp_level;
+
+ rc = snp_lookup_rmpentry(pfn, &rmp_level);
+ if (rc < 0)
+ return -EINVAL;
+
+ /* If page is not assigned then do nothing */
+ if (!rc)
+ return 0;
+
+ /*
+ * Is the page part of an existing 2MB RMP entry ? Split the 2MB into
+ * multiple of 4K-page before making the memory shared.
+ */
+ if (level == PG_LEVEL_4K && rmp_level == PG_LEVEL_2M) {
+ rc = snp_rmptable_psmash(kvm, pfn);
+ if (rc)
+ return rc;
+ }
+
+ return rmp_make_shared(pfn, level);
+}
+
/*
* TODO: need to get the value set by userspace in vcpu->run->vmgexit.ghcb_msr
* and process that here accordingly.
@@ -4373,3 +4398,104 @@ void handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
kvm_zap_gfn_range(kvm, gfn, gfn + PTRS_PER_PMD);
put_page(pfn_to_page(pfn));
}
+
+static inline u8 order_to_level(int order)
+{
+ BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
+
+ if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
+ return PG_LEVEL_1G;
+
+ if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
+ return PG_LEVEL_2M;
+
+ return PG_LEVEL_4K;
+}
+
+int sev_update_mem_attr(struct kvm_memory_slot *slot, unsigned int attr,
+ gfn_t start, gfn_t end)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(slot->kvm)->sev_info;
+ enum psc_op op = (attr & KVM_MEMORY_ATTRIBUTE_PRIVATE) ? SNP_PAGE_STATE_PRIVATE
+ : SNP_PAGE_STATE_SHARED;
+ gfn_t gfn = start;
+
+ pr_debug("%s: GFN 0x%llx - 0x%llx, op: %d\n", __func__, start, end, op);
+
+ if (!sev_snp_guest(slot->kvm))
+ return 0;
+
+ if (!kvm_slot_can_be_private(slot)) {
+ pr_err_ratelimited("%s: memslot for gfn: 0x%llx is not private.\n",
+ __func__, gfn);
+ return -EPERM;
+ }
+
+ while (gfn < end) {
+ kvm_pfn_t pfn;
+ int level = PG_LEVEL_4K; /* TODO: take actual order into account */
+ gpa_t gpa = gfn_to_gpa(gfn);
+ int npages = 1;
+ int order;
+ int rc;
+
+ /*
+ * No work to do if there was never a page allocated from private
+ * memory. If there was a page that was deallocated previously,
+ * the invalidation notifier should have restored the page to
+ * shared.
+ */
+ rc = kvm_restrictedmem_get_pfn(slot, gfn, &pfn, &order);
+ if (rc) {
+ pr_warn_ratelimited("%s: failed to retrieve gfn 0x%llx from private FD\n",
+ __func__, gfn);
+ gfn++;
+ continue;
+ }
+
+ /*
+ * TODO: The RMP entry's hugepage bit is ignored for
+ * shared/unassigned pages. Either handle looping through each
+ * sub-page as part of snp_make_page_shared(), or remove the
+ * level argument.
+ */
+ if (op == SNP_PAGE_STATE_PRIVATE && order &&
+ IS_ALIGNED(gfn, 1 << order) && (gfn + (1 << order)) <= end) {
+ level = order_to_level(order);
+ npages = 1 << order;
+ }
+
+ /*
+ * Grab the PFN from private memslot and update the RMP entry.
+ * It may be worthwhile to go ahead and map it into the TDP at
+ * this point if the guest is doing lazy acceptance, but for
+ * up-front bulk shared->private conversions it's not likely
+ * the guest will try to access the PFN any time soon, so for
+ * now just take the let KVM MMU handle faulting it on the next
+ * access.
+ */
+ switch (op) {
+ case SNP_PAGE_STATE_SHARED:
+ rc = snp_make_page_shared(slot->kvm, gpa, pfn, level);
+ break;
+ case SNP_PAGE_STATE_PRIVATE:
+ rc = rmp_make_private(pfn, gpa, level, sev->asid, false);
+ break;
+ default:
+ rc = PSC_INVALID_ENTRY;
+ break;
+ }
+
+ put_page(pfn_to_page(pfn));
+
+ if (rc) {
+ pr_err_ratelimited("%s: failed op %d gpa %llx pfn %llx level %d rc %d\n",
+ __func__, op, gpa, pfn, level, rc);
+ return -EINVAL;
+ }
+
+ gfn += npages;
+ }
+
+ return 0;
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 539926b07ee5..e2edc4700e55 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4860,6 +4860,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.alloc_apic_backing_page = svm_alloc_apic_backing_page,
.adjust_mapping_level = sev_adjust_mapping_level,
+ .update_mem_attr = sev_update_mem_attr,
};
/*
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 37bd7b728d52..50a2bcaf3fd7 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -725,6 +725,8 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
void sev_adjust_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int *level);
void handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
+int sev_update_mem_attr(struct kvm_memory_slot *slot, unsigned int attr,
+ gfn_t start, gfn_t end);
/* vmenter.S */
--
2.25.1
Powered by blists - more mailing lists