lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171020023413.122280-33-brijesh.singh@amd.com>
Date:   Thu, 19 Oct 2017 21:34:07 -0500
From:   Brijesh Singh <brijesh.singh@....com>
To:     kvm@...r.kernel.org
Cc:     bp@...en8.de, Brijesh Singh <brijesh.singh@....com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H. Peter Anvin" <hpa@...or.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Radim Krčmář <rkrcmar@...hat.com>,
        Joerg Roedel <joro@...tes.org>, Borislav Petkov <bp@...e.de>,
        Tom Lendacky <thomas.lendacky@....com>, x86@...nel.org,
        linux-kernel@...r.kernel.org
Subject: [Part2 PATCH v6 32/38] KVM: SVM: Add support for SEV DEBUG_DECRYPT command

The command is used for decrypting a guest memory region for debug
purposes.

Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: "Radim Krčmář" <rkrcmar@...hat.com>
Cc: Joerg Roedel <joro@...tes.org>
Cc: Borislav Petkov <bp@...e.de>
Cc: Tom Lendacky <thomas.lendacky@....com>
Cc: x86@...nel.org
Cc: kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
---
 arch/x86/kvm/svm.c | 179 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 179 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 83a4d0406a4b..f19c4fb2fdc8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -6023,6 +6023,182 @@ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
 	return ret;
 }
 
+static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
+			       unsigned long dst, int size,
+			       int *error, bool enc)
+{
+	struct kvm_sev_info *sev = &kvm->arch.sev_info;
+	struct sev_data_dbg *data;
+	int ret;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->handle = sev->handle;
+	data->dst_addr = dst;
+	data->src_addr = src;
+	data->len = size;
+
+	ret = sev_issue_cmd(kvm,
+			    enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
+			    data, error);
+	kfree(data);
+	return ret;
+}
+
+/*
+ * Decrypt source memory into userspace or kernel buffer. If destination buffer
+ * or len is not aligned to 16-byte boundary then it uses intermediate buffer.
+ */
+static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long paddr,
+			     unsigned long __user dst_uaddr,
+			     unsigned long dst_kaddr, unsigned long dst_paddr,
+			     int size, int *error)
+{
+	int ret, offset = 0, len = size;
+	struct page *tpage = NULL;
+
+	/*
+	 * Debug command works with 16-byte aligned inputs, check if all inputs
+	 * (src, dst and len) are 16-byte aligned. If one of the input is not
+	 * aligned then we decrypt more than requested into a temporary buffer
+	 * and copy the porition of data into destination buffer.
+	 */
+	if (!IS_ALIGNED(paddr, 	   16) ||
+	    !IS_ALIGNED(dst_paddr, 16) ||
+	    !IS_ALIGNED(size, 	   16)) {
+		tpage = (void *)alloc_page(GFP_KERNEL);
+		if (!tpage)
+			return -ENOMEM;
+
+		dst_paddr = __sme_page_pa(tpage);
+
+		/*
+		 * if source buffer is not aligned then offset will be used
+		 * when copying the data from the temporary buffer into
+		 * destination buffer.
+		 */
+		offset = paddr & 15;
+
+		/* its safe to read more than requested size. */
+		len = round_up(size + offset, 16);
+
+		paddr = round_down(paddr, 16);
+
+		/*
+		 * The temporary buffer may have mapping with C=0 or C=1 on x86
+		 * side but PSP will will write the memory region with C=0.
+		 * Lets make sure x86 cache for this memory range is flushed so
+		 * that we can see the recent contents after the command
+		 * completes.
+		 */
+		clflush_cache_range(page_address(tpage), PAGE_SIZE);
+	}
+
+	ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, false);
+
+	/*
+	 * If temporary buffer is used then copy the data from temporary buffer
+	 * into destination buffer.
+	 */
+	if (!ret && tpage) {
+		/*
+		 * If destination buffer is a userspace buffer then use
+		 * copy_to_user otherwise memcpy.
+		 */
+		if (dst_uaddr) {
+			if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
+					 page_address(tpage) + offset, size))
+				ret = -EFAULT;
+		} else {
+			memcpy((void *)dst_kaddr, page_address(tpage) + offset, size);
+		}
+	}
+
+	if (tpage)
+		__free_page(tpage);
+
+	return ret;
+}
+
+static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+{
+	unsigned long vaddr, vaddr_end, next_vaddr;
+	unsigned long dst_vaddr, dst_vaddr_end;
+	struct page **src_p, **dst_p;
+	struct kvm_sev_dbg debug;
+	unsigned long n;
+	int ret, size;
+
+	if (!sev_guest(kvm))
+		return -ENOTTY;
+
+	if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
+		return -EFAULT;
+
+	vaddr = debug.src_uaddr;
+	size = debug.len;
+	vaddr_end = vaddr + size;
+	dst_vaddr = debug.dst_uaddr;
+	dst_vaddr_end = dst_vaddr + size;
+
+	for (; vaddr < vaddr_end; vaddr = next_vaddr) {
+		int len, s_off, d_off;
+
+		/* lock userspace source and destination page */
+		src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
+		if (!src_p)
+			return -EFAULT;
+
+		dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
+		if (!dst_p) {
+			sev_unpin_memory(kvm, src_p, n);
+			return -EFAULT;
+		}
+
+		/*
+		 * Depending on the operation type (encrypt or decrypt), PSP
+		 * will access the source and destination buffer with C=0 or
+		 * C=1. Lets make sure that caches are flushed so that data gets
+		 * accessed with correct C-bit.
+		 */
+		sev_clflush_pages(src_p, 1);
+		sev_clflush_pages(dst_p, 1);
+
+		/*
+		 * since user buffer may not be page aligned, calculate the
+		 * offset within the page.
+		 */
+		s_off = vaddr & ~PAGE_MASK;
+		d_off = dst_vaddr & ~PAGE_MASK;
+		len = min_t(size_t, (PAGE_SIZE - s_off), size);
+
+		ret = __sev_dbg_decrypt(kvm,
+				       __sme_page_pa(src_p[0]) + s_off,
+				       dst_vaddr, 0,
+				       __sme_page_pa(dst_p[0]) + d_off,
+				       len, &argp->error);
+
+		sev_unpin_memory(kvm, src_p, 1);
+		sev_unpin_memory(kvm, dst_p, 1);
+
+		if (ret)
+			goto err;
+
+		next_vaddr = vaddr + len;
+		dst_vaddr = dst_vaddr + len;
+		size -= len;
+	}
+err:
+	return ret;
+}
+
+static int sev_dbg_decrypt(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+	return sev_dbg_crypt(kvm, argp, true);
+}
+
 static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 {
 	struct kvm_sev_cmd sev_cmd;
@@ -6055,6 +6231,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 	case KVM_SEV_GUEST_STATUS:
 		r = sev_guest_status(kvm, &sev_cmd);
 		break;
+	case KVM_SEV_DBG_DECRYPT:
+		r = sev_dbg_decrypt(kvm, &sev_cmd);
+		break;
 	default:
 		r = -EINVAL;
 		goto out;
-- 
2.9.5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ