[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200819151742.7892-3-Eric.VanTassell@amd.com>
Date: Wed, 19 Aug 2020 10:17:40 -0500
From: eric van tassell <Eric.VanTassell@....com>
To: kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, bp@...en8.de, hpa@...or.com,
mingo@...hat.com, jmattson@...gle.com, joro@...tes.org,
pbonzini@...hat.com, sean.j.christopherson@...el.com,
tglx@...utronix.de, vkuznets@...hat.com, wanpengli@...cent.com,
x86@...nel.org, rientjes@...gle.com, junaids@...gle.com,
evantass@....com
Subject: [Patch v2 2/4] KVM:SVM: Implement pin_page support
Improve SEV guest startup time from O(n) to a constant by deferring
guest page pinning until the pages are used to satisfy nested page faults.
Implement the code to do the pinning (sev_get_page) and the notifier
sev_pin_page().
Track the pinned pages with xarray so they can be released during guest
termination.
Co-developed-by: Brijesh Singh <brijesh.singh@....com>
Signed-off-by: eric van tassell <Eric.VanTassell@....com>
---
arch/x86/kvm/svm/sev.c | 68 ++++++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/svm/svm.c | 2 ++
arch/x86/kvm/svm/svm.h | 3 ++
3 files changed, 73 insertions(+)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index f7f1f4ecf08e..8d56d1afb33e 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -184,6 +184,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
sev->asid = asid;
INIT_LIST_HEAD(&sev->regions_list);
+ xa_init(&sev->pages_xarray);
+
return 0;
e_free:
@@ -415,6 +417,43 @@ static unsigned long get_num_contig_pages(unsigned long idx,
return pages;
}
+static int sev_get_page(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct xarray *xa = &sev->pages_xarray;
+ struct page *page = pfn_to_page(pfn);
+ int ret;
+
+ /* store page at index = gfn */
+ ret = xa_insert(xa, gfn, page, GFP_ATOMIC);
+ if (ret == -EBUSY) {
+ /*
+ * If xa_insert returned -EBUSY, the gfn was already associated
+ * with a struct page *.
+ */
+ struct page *cur_page;
+
+ cur_page = xa_load(xa, gfn);
+ /* If cur_page == page, no change is needed, so return 0 */
+ if (cur_page == page)
+ return 0;
+
+ /* Release the page that was stored at index = gfn */
+ put_page(cur_page);
+
+ /* Return result of attempting to store page at index = gfn */
+ ret = xa_err(xa_store(xa, gfn, page, GFP_ATOMIC));
+ WARN_ON(ret != 0);
+ }
+
+ if (ret)
+ return ret;
+
+ get_page(page);
+
+ return 0;
+}
+
static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
@@ -1085,6 +1124,8 @@ void sev_vm_destroy(struct kvm *kvm)
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct list_head *head = &sev->regions_list;
struct list_head *pos, *q;
+ XA_STATE(xas, &sev->pages_xarray, 0);
+ struct page *xa_page;
if (!sev_guest(kvm))
return;
@@ -1109,6 +1150,12 @@ void sev_vm_destroy(struct kvm *kvm)
}
}
+ /* Release each pinned page that SEV tracked in sev->pages_xarray. */
+ xas_for_each(&xas, xa_page, ULONG_MAX) {
+ put_page(xa_page);
+ }
+ xa_destroy(&sev->pages_xarray);
+
mutex_unlock(&kvm->lock);
sev_unbind_asid(kvm, sev->handle);
@@ -1193,3 +1240,24 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
}
+
+int sev_pin_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn,
+ int level, u64 *spte)
+{
+ int rc;
+
+ if (!sev_guest(vcpu->kvm))
+ return 0;
+
+ rc = sev_get_page(vcpu->kvm, gfn, pfn);
+ if (rc)
+ return rc;
+
+ /*
+ * Flush any cached lines of the page being added since "ownership" of
+ * it will be transferred from the host to an encrypted guest.
+ */
+ clflush_cache_range(__va(pfn << PAGE_SHIFT), page_level_size(level));
+
+ return 0;
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 535ad311ad02..adb308631416 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4130,6 +4130,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
+
+ .pin_page = sev_pin_page,
};
static struct kvm_x86_init_ops svm_init_ops __initdata = {
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 121b198b51e9..278c46bc52aa 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -65,6 +65,7 @@ struct kvm_sev_info {
int fd; /* SEV device fd */
unsigned long pages_locked; /* Number of pages locked */
struct list_head regions_list; /* List of registered regions */
+ struct xarray pages_xarray; /* List of PFN locked */
};
struct kvm_svm {
@@ -488,5 +489,7 @@ int svm_unregister_enc_region(struct kvm *kvm,
void pre_sev_run(struct vcpu_svm *svm, int cpu);
int __init sev_hardware_setup(void);
void sev_hardware_teardown(void);
+int sev_pin_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn,
+ int level, u64 *spte);
#endif
--
2.17.1
Powered by blists - more mailing lists