[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <acc6c80aad1ed2e60f7eb35f92c589d57beb2636.1606633738.git.ashish.kalra@amd.com>
Date: Mon, 30 Nov 2020 23:33:14 +0000
From: Ashish Kalra <Ashish.Kalra@....com>
To: pbonzini@...hat.com
Cc: tglx@...utronix.de, mingo@...hat.com, hpa@...or.com,
rkrcmar@...hat.com, joro@...tes.org, bp@...e.de,
thomas.lendacky@....com, x86@...nel.org, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, srutherford@...gle.com,
brijesh.singh@....com, dovmurik@...ux.vnet.ibm.com, tobin@....com,
jejb@...ux.ibm.com, frankeh@...ibm.com, dgilbert@...hat.com
Subject: [PATCH 6/9] KVM: SVM: Add support for static allocation of unified Page Encryption Bitmap.
From: Ashish Kalra <ashish.kalra@....com>
Add support for static allocation of the unified Page encryption bitmap by
extending kvm_arch_commit_memory_region() callack to add svm specific x86_ops
which can read the userspace provided memory region/memslots and calculate
the amount of guest RAM managed by the KVM and grow the bitmap based
on that information, i.e. the highest guest PA that is mapped by a memslot.
Signed-off-by: Ashish Kalra <ashish.kalra@....com>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/svm/sev.c | 35 +++++++++++++++++++++++++++++++++
arch/x86/kvm/svm/svm.c | 1 +
arch/x86/kvm/svm/svm.h | 1 +
arch/x86/kvm/x86.c | 5 +++++
5 files changed, 43 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 352ebc576036..91fc22d793e8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1282,6 +1282,7 @@ struct kvm_x86_ops {
void (*migrate_timers)(struct kvm_vcpu *vcpu);
void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
+ void (*commit_memory_region)(struct kvm *kvm, enum kvm_mr_change change);
int (*page_enc_status_hc)(struct kvm *kvm, unsigned long gpa,
unsigned long sz, unsigned long mode);
int (*get_page_enc_bitmap)(struct kvm *kvm,
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 9fe9fba34e68..37cf12cfbde6 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -957,6 +957,41 @@ static int sev_resize_page_enc_bitmap(struct kvm *kvm, unsigned long new_size)
return 0;
}
+void svm_commit_memory_region(struct kvm *kvm, enum kvm_mr_change change)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ gfn_t start, end = 0;
+
+ spin_lock(&kvm->mmu_lock);
+ if (change == KVM_MR_CREATE) {
+ slots = kvm_memslots(kvm);
+ kvm_for_each_memslot(memslot, slots) {
+ start = memslot->base_gfn;
+ end = memslot->base_gfn + memslot->npages;
+ /*
+ * KVM memslots is a sorted list, starting with
+ * the highest mapped guest PA, so pick the topmost
+ * valid guest PA.
+ */
+ if (memslot->npages)
+ break;
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+
+ if (end) {
+ /*
+ * NORE: This callback is invoked in vm ioctl
+ * set_user_memory_region, hence we can use a
+ * mutex here.
+ */
+ mutex_lock(&kvm->lock);
+ sev_resize_page_enc_bitmap(kvm, end);
+ mutex_unlock(&kvm->lock);
+ }
+}
+
int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
unsigned long npages, unsigned long enc)
{
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 6ebdf20773ea..7aa7858c8209 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4313,6 +4313,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.msr_filter_changed = svm_msr_filter_changed,
+ .commit_memory_region = svm_commit_memory_region,
.page_enc_status_hc = svm_page_enc_status_hc,
.get_page_enc_bitmap = svm_get_page_enc_bitmap,
.set_page_enc_bitmap = svm_set_page_enc_bitmap,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 2268c0ab650b..5a4656bad681 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -415,6 +415,7 @@ int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
unsigned long npages, unsigned long enc);
int svm_get_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap);
int svm_set_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap);
+void svm_commit_memory_region(struct kvm *kvm, enum kvm_mr_change change);
extern struct kvm_x86_nested_ops svm_nested_ops;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3cf64a94004f..c1acbd397b50 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10717,6 +10717,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
/* Free the arrays associated with the old memslot. */
if (change == KVM_MR_MOVE)
kvm_arch_free_memslot(kvm, old);
+
+ if (change == KVM_MR_CREATE || change == KVM_MR_DELETE) {
+ if (kvm_x86_ops.commit_memory_region)
+ kvm_x86_ops.commit_memory_region(kvm, change);
+ }
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
--
2.17.1
Powered by blists - more mailing lists