[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <a6a3826234bebe908290bb3f6de35356f44acbb9.1612398155.git.ashish.kalra@amd.com>
Date: Thu, 4 Feb 2021 00:40:48 +0000
From: Ashish Kalra <Ashish.Kalra@....com>
To: pbonzini@...hat.com
Cc: tglx@...utronix.de, mingo@...hat.com, hpa@...or.com,
rkrcmar@...hat.com, joro@...tes.org, bp@...e.de,
thomas.lendacky@....com, x86@...nel.org, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, srutherford@...gle.com,
seanjc@...gle.com, venu.busireddy@...cle.com, brijesh.singh@....com
Subject: [PATCH v10 16/16] KVM: SVM: Bypass DBG_DECRYPT API calls for unencrypted guest memory.
From: Ashish Kalra <ashish.kalra@....com>
For all unencrypted guest memory regions such as S/W IOTLB
bounce buffers and for guest regions marked as "__bss_decrypted",
ensure that DBG_DECRYPT API calls are bypassed.
The guest memory regions encryption status is referenced using the
shared pages list.
Signed-off-by: Ashish Kalra <ashish.kalra@....com>
---
arch/x86/kvm/svm/sev.c | 126 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 126 insertions(+)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 93f42b3d3e33..fa3fbbb73b33 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -888,6 +888,117 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
return ret;
}
+static struct kvm_memory_slot *hva_to_memslot(struct kvm *kvm,
+ unsigned long hva)
+{
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memory_slot *memslot;
+
+ kvm_for_each_memslot(memslot, slots) {
+ if (hva >= memslot->userspace_addr &&
+ hva < memslot->userspace_addr +
+ (memslot->npages << PAGE_SHIFT))
+ return memslot;
+ }
+
+ return NULL;
+}
+
+static bool hva_to_gfn(struct kvm *kvm, unsigned long hva, gfn_t *gfn)
+{
+ struct kvm_memory_slot *memslot;
+ gpa_t gpa_offset;
+
+ memslot = hva_to_memslot(kvm, hva);
+ if (!memslot)
+ return false;
+
+ gpa_offset = hva - memslot->userspace_addr;
+ *gfn = ((memslot->base_gfn << PAGE_SHIFT) + gpa_offset) >> PAGE_SHIFT;
+
+ return true;
+}
+
+static bool is_unencrypted_region(gfn_t gfn_start, gfn_t gfn_end,
+ struct list_head *head)
+{
+ struct shared_region *pos;
+
+ list_for_each_entry(pos, head, list)
+ if (gfn_start >= pos->gfn_start &&
+ gfn_end <= pos->gfn_end)
+ return true;
+
+ return false;
+}
+
+static int handle_unencrypted_region(struct kvm *kvm,
+ unsigned long vaddr,
+ unsigned long vaddr_end,
+ unsigned long dst_vaddr,
+ unsigned int size,
+ bool *is_decrypted)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct page *page = NULL;
+ gfn_t gfn_start, gfn_end;
+ int len, s_off, d_off;
+ int srcu_idx;
+ int ret = 0;
+
+ /* ensure hva_to_gfn translations remain valid */
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+
+ if (!hva_to_gfn(kvm, vaddr, &gfn_start)) {
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return -EINVAL;
+ }
+
+ if (!hva_to_gfn(kvm, vaddr_end, &gfn_end)) {
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return -EINVAL;
+ }
+
+ if (sev->shared_pages_list_count) {
+ if (is_unencrypted_region(gfn_start, gfn_end,
+ &sev->shared_pages_list)) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return -ENOMEM;
+ }
+
+ /*
+ * Since user buffer may not be page aligned, calculate the
+ * offset within the page.
+ */
+ s_off = vaddr & ~PAGE_MASK;
+ d_off = dst_vaddr & ~PAGE_MASK;
+ len = min_t(size_t, (PAGE_SIZE - s_off), size);
+
+ if (copy_from_user(page_address(page),
+ (void __user *)(uintptr_t)vaddr, len)) {
+ __free_page(page);
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return -EFAULT;
+ }
+
+ if (copy_to_user((void __user *)(uintptr_t)dst_vaddr,
+ page_address(page), len)) {
+ ret = -EFAULT;
+ }
+
+ __free_page(page);
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ *is_decrypted = true;
+ return ret;
+ }
+ }
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ *is_decrypted = false;
+ return ret;
+}
+
static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
{
unsigned long vaddr, vaddr_end, next_vaddr;
@@ -917,6 +1028,20 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
for (; vaddr < vaddr_end; vaddr = next_vaddr) {
int len, s_off, d_off;
+ if (dec) {
+ bool is_already_decrypted;
+
+ ret = handle_unencrypted_region(kvm,
+ vaddr,
+ vaddr_end,
+ dst_vaddr,
+ size,
+ &is_already_decrypted);
+
+ if (ret || is_already_decrypted)
+ goto already_decrypted;
+ }
+
/* lock userspace source and destination page */
src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
if (IS_ERR(src_p))
@@ -961,6 +1086,7 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
sev_unpin_memory(kvm, src_p, n);
sev_unpin_memory(kvm, dst_p, n);
+already_decrypted:
if (ret)
goto err;
--
2.17.1
Powered by blists - more mailing lists