[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20190620163832.5451-3-brijesh.singh@amd.com>
Date: Thu, 20 Jun 2019 16:38:51 +0000
From: "Singh, Brijesh" <brijesh.singh@....com>
To: "kvm@...r.kernel.org" <kvm@...r.kernel.org>
CC: "Singh, Brijesh" <brijesh.singh@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim Krčmář <rkrcmar@...hat.com>,
Joerg Roedel <joro@...tes.org>, Borislav Petkov <bp@...e.de>,
"Lendacky, Thomas" <Thomas.Lendacky@....com>,
"x86@...nel.org" <x86@...nel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: [RFC PATCH v2 02/11] KVM: SVM: Add KVM_SEND_UPDATE_DATA command
The command is used for encrypting the guest memory region using the encryption
context created with KVM_SEV_SEND_START.
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: "Radim Krčmář" <rkrcmar@...hat.com>
Cc: Joerg Roedel <joro@...tes.org>
Cc: Borislav Petkov <bp@...e.de>
Cc: Tom Lendacky <thomas.lendacky@....com>
Cc: x86@...nel.org
Cc: kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
---
.../virtual/kvm/amd-memory-encryption.rst | 24 ++++
arch/x86/kvm/svm.c | 120 +++++++++++++++++-
include/uapi/linux/kvm.h | 9 ++
3 files changed, 149 insertions(+), 4 deletions(-)
diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst
index 9ea974c87980..ea881f21bc60 100644
--- a/Documentation/virtual/kvm/amd-memory-encryption.rst
+++ b/Documentation/virtual/kvm/amd-memory-encryption.rst
@@ -265,6 +265,30 @@ Returns: 0 on success, -negative on error
__u32 session_len;
};
+11. KVM_SEV_SEND_UPDATE_DATA
+----------------------------
+
+The KVM_SEV_SEND_UPDATE_DATA command can be used by the hypervisor to encrypt the
+outgoing guest memory region with the encryption context creating using
+KVM_SEV_SEND_START.
+
+Parameters (in): struct kvm_sev_send_update_data
+
+Returns: 0 on success, -negative on error
+
+::
+
+ struct kvm_sev_launch_send_update_data {
+ __u64 hdr_uaddr; /* userspace address containing the packet header */
+ __u32 hdr_len;
+
+ __u64 guest_uaddr; /* the source memory region to be encrypted */
+ __u32 guest_len;
+
+ __u64 trans_uaddr; /* the destition memory region */
+ __u32 trans_len;
+ };
+
References
==========
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 98e5a6c2bacc..de353664ea22 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -421,6 +421,7 @@ enum {
static unsigned int max_sev_asid;
static unsigned int min_sev_asid;
+static unsigned long sev_me_mask;
static unsigned long *sev_asid_bitmap;
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
@@ -1219,16 +1220,21 @@ static int avic_ga_log_notifier(u32 ga_tag)
static __init int sev_hardware_setup(void)
{
struct sev_user_data_status *status;
+ int eax, ebx;
int rc;
- /* Maximum number of encrypted guests supported simultaneously */
- max_sev_asid = cpuid_ecx(0x8000001F);
+ /*
+ * Query the memory encryption information.
+ * EBX: Bit 0:5 Pagetable bit position used to indicate encryption (aka Cbit).
+ * ECX: Maximum number of encrypted guests supported simultaneously.
+ * EDX: Minimum ASID value that should be used for SEV guest.
+ */
+ cpuid(0x8000001f, &eax, &ebx, &max_sev_asid, &min_sev_asid);
if (!max_sev_asid)
return 1;
- /* Minimum ASID value that should be used for SEV guest */
- min_sev_asid = cpuid_edx(0x8000001F);
+ sev_me_mask = 1UL << (ebx & 0x3f);
/* Initialize SEV ASID bitmap */
sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
@@ -7062,6 +7068,109 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
return ret;
}
+static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_send_update_data *data;
+ struct kvm_sev_send_update_data params;
+ void *hdr = NULL, *trans_data = NULL;
+ struct page **guest_page = NULL;
+ unsigned long n;
+ int ret, offset;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
+ sizeof(struct kvm_sev_send_update_data)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* userspace wants to query either header or trans length */
+ if (!params.trans_len || !params.hdr_len)
+ goto cmd;
+
+ ret = -EINVAL;
+ if (!params.trans_uaddr || !params.guest_uaddr ||
+ !params.guest_len || !params.hdr_uaddr)
+ goto e_free;
+
+ /* Check if we are crossing the page boundry */
+ ret = -EINVAL;
+ offset = params.guest_uaddr & (PAGE_SIZE - 1);
+ if ((params.guest_len + offset > PAGE_SIZE))
+ goto e_free;
+
+ ret = -ENOMEM;
+ hdr = kmalloc(params.hdr_len, GFP_KERNEL);
+ if (!hdr)
+ goto e_free;
+
+ data->hdr_address = __psp_pa(hdr);
+ data->hdr_len = params.hdr_len;
+
+ ret = -ENOMEM;
+ trans_data = kmalloc(params.trans_len, GFP_KERNEL);
+ if (!trans_data)
+ goto e_free;
+
+ data->trans_address = __psp_pa(trans_data);
+ data->trans_len = params.trans_len;
+
+ /* Pin guest memory */
+ ret = -EFAULT;
+ guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
+ PAGE_SIZE, &n, 0);
+ if (!guest_page)
+ goto e_free;
+
+ /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
+ data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
+ data->guest_address |= sev_me_mask;
+ data->guest_len = params.guest_len;
+
+cmd:
+ data->handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, data, &argp->error);
+
+ /* userspace asked for header or trans length and FW responded with data */
+ if (!params.trans_len || !params.hdr_len) {
+ params.hdr_len = data->hdr_len;
+ params.trans_len = data->trans_len;
+ goto done;
+ }
+
+ if (ret)
+ goto e_unpin;
+
+ /* copy transport buffer to user space */
+ if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
+ trans_data, params.trans_len)) {
+ ret = -EFAULT;
+ goto e_unpin;
+ }
+
+ /* copy packet header to userspace */
+ if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, params.hdr_len))
+ ret = -EFAULT;
+
+e_unpin:
+ sev_unpin_memory(kvm, guest_page, n);
+done:
+ if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms,
+ sizeof(struct kvm_sev_send_update_data)))
+ ret = -EFAULT;
+e_free:
+ kfree(data);
+ kfree(trans_data);
+ kfree(hdr);
+
+ return ret;
+}
+
static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -7106,6 +7215,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
case KVM_SEV_SEND_START:
r = sev_send_start(kvm, &sev_cmd);
break;
+ case KVM_SEV_SEND_UPDATE_DATA:
+ r = sev_send_update_data(kvm, &sev_cmd);
+ break;
default:
r = -EINVAL;
goto out;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4e9e7a5b2066..4cb6c3774ec2 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1543,6 +1543,15 @@ struct kvm_sev_send_start {
__u32 session_len;
};
+struct kvm_sev_send_update_data {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+};
+
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
--
2.17.1
Powered by blists - more mailing lists