[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9c990d23-1318-a178-01b6-6c1fbf8018dc@profian.com>
Date: Wed, 11 Jan 2023 15:04:16 +0100
From: Harald Hoyer <harald@...fian.com>
To: Tom Dohrmann <erbse.13@....de>, Michael Roth <michael.roth@....com>
Cc: kvm@...r.kernel.org, linux-coco@...ts.linux.dev,
linux-mm@...ck.org, linux-crypto@...r.kernel.org, x86@...nel.org,
linux-kernel@...r.kernel.org, tglx@...utronix.de, mingo@...hat.com,
jroedel@...e.de, thomas.lendacky@....com, hpa@...or.com,
ardb@...nel.org, pbonzini@...hat.com, seanjc@...gle.com,
vkuznets@...hat.com, wanpengli@...cent.com, jmattson@...gle.com,
luto@...nel.org, dave.hansen@...ux.intel.com, slp@...hat.com,
pgonda@...gle.com, peterz@...radead.org,
srinivas.pandruvada@...ux.intel.com, rientjes@...gle.com,
dovmurik@...ux.ibm.com, tobin@....com, bp@...en8.de,
vbabka@...e.cz, kirill@...temov.name, ak@...ux.intel.com,
tony.luck@...el.com, marcorr@...gle.com,
sathyanarayanan.kuppuswamy@...ux.intel.com, alpergun@...gle.com,
dgilbert@...hat.com, jarkko@...nel.org, ashish.kalra@....com,
Brijesh Singh <brijesh.singh@....com>
Subject: Re: [PATCH RFC v7 39/64] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_UPDATE
command
Am 11.01.23 um 14:56 schrieb Tom Dohrmann:
> On Wed, Dec 14, 2022 at 01:40:31PM -0600, Michael Roth wrote:
>> From: Brijesh Singh <brijesh.singh@....com>
>>
>> The KVM_SEV_SNP_LAUNCH_UPDATE command can be used to insert data into the
>> guest's memory. The data is encrypted with the cryptographic context
>> created with the KVM_SEV_SNP_LAUNCH_START.
>>
>> In addition to the inserting data, it can insert a two special pages
>> into the guests memory: the secrets page and the CPUID page.
>>
>> While terminating the guest, reclaim the guest pages added in the RMP
>> table. If the reclaim fails, then the page is no longer safe to be
>> released back to the system and leak them.
>>
>> For more information see the SEV-SNP specification.
>>
>> Co-developed-by: Michael Roth <michael.roth@....com>
>> Signed-off-by: Michael Roth <michael.roth@....com>
>> Signed-off-by: Brijesh Singh <brijesh.singh@....com>
>> Signed-off-by: Ashish Kalra <ashish.kalra@....com>
>> ---
>> .../virt/kvm/x86/amd-memory-encryption.rst | 29 ++++
>> arch/x86/kvm/svm/sev.c | 161 ++++++++++++++++++
>> include/uapi/linux/kvm.h | 19 +++
>> 3 files changed, 209 insertions(+)
>>
>> diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
>> index 58971fc02a15..c94be8e6d657 100644
>> --- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
>> +++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
>> @@ -485,6 +485,35 @@ Returns: 0 on success, -negative on error
>>
>> See the SEV-SNP specification for further detail on the launch input.
>>
>> +20. KVM_SNP_LAUNCH_UPDATE
>> +-------------------------
>> +
>> +The KVM_SNP_LAUNCH_UPDATE is used for encrypting a memory region. It also
>> +calculates a measurement of the memory contents. The measurement is a signature
>> +of the memory contents that can be sent to the guest owner as an attestation
>> +that the memory was encrypted correctly by the firmware.
>> +
>> +Parameters (in): struct kvm_snp_launch_update
>> +
>> +Returns: 0 on success, -negative on error
>> +
>> +::
>> +
>> + struct kvm_sev_snp_launch_update {
>> + __u64 start_gfn; /* Guest page number to start from. */
>> + __u64 uaddr; /* userspace address need to be encrypted */
>> + __u32 len; /* length of memory region */
>> + __u8 imi_page; /* 1 if memory is part of the IMI */
>> + __u8 page_type; /* page type */
>> + __u8 vmpl3_perms; /* VMPL3 permission mask */
>> + __u8 vmpl2_perms; /* VMPL2 permission mask */
>> + __u8 vmpl1_perms; /* VMPL1 permission mask */
>> + };
>> +
>> +See the SEV-SNP spec for further details on how to build the VMPL permission
>> +mask and page type.
>> +
>> +
>> References
>> ==========
>>
>> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
>> index 6d1d0e424f76..379e61a9226a 100644
>> --- a/arch/x86/kvm/svm/sev.c
>> +++ b/arch/x86/kvm/svm/sev.c
>> @@ -238,6 +238,37 @@ static void sev_decommission(unsigned int handle)
>> sev_guest_decommission(&decommission, NULL);
>> }
>>
>> +static int snp_page_reclaim(u64 pfn)
>> +{
>> + struct sev_data_snp_page_reclaim data = {0};
>> + int err, rc;
>> +
>> + data.paddr = __sme_set(pfn << PAGE_SHIFT);
>> + rc = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
>> + if (rc) {
>> + /*
>> + * If the reclaim failed, then page is no longer safe
>> + * to use.
>> + */
>> + snp_mark_pages_offline(pfn,
>> + page_level_size(PG_LEVEL_4K) >> PAGE_SHIFT);
>> + }
>> +
>> + return rc;
>> +}
>> +
>> +static int host_rmp_make_shared(u64 pfn, enum pg_level level, bool leak)
>> +{
>> + int rc;
>> +
>> + rc = rmp_make_shared(pfn, level);
>> + if (rc && leak)
>> + snp_mark_pages_offline(pfn,
>> + page_level_size(level) >> PAGE_SHIFT);
>> +
>> + return rc;
>> +}
>> +
>> static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
>> {
>> struct sev_data_deactivate deactivate;
>> @@ -2085,6 +2116,133 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
>> return rc;
>> }
>>
>> +static int snp_launch_update_gfn_handler(struct kvm *kvm,
>> + struct kvm_gfn_range *range,
>> + void *opaque)
>> +{
>> + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
>> + struct kvm_memory_slot *memslot = range->slot;
>> + struct sev_data_snp_launch_update data = {0};
>> + struct kvm_sev_snp_launch_update params;
>> + struct kvm_sev_cmd *argp = opaque;
>> + int *error = &argp->error;
>> + int i, n = 0, ret = 0;
>> + unsigned long npages;
>> + kvm_pfn_t *pfns;
>> + gfn_t gfn;
>> +
>> + if (!kvm_slot_can_be_private(memslot)) {
>> + pr_err("SEV-SNP requires restricted memory.\n");
>> + return -EINVAL;
>> + }
>> +
>> + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) {
>> + pr_err("Failed to copy user parameters for SEV-SNP launch.\n");
>> + return -EFAULT;
>> + }
>> +
>> + data.gctx_paddr = __psp_pa(sev->snp_context);
>> +
>> + npages = range->end - range->start;
>> + pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL_ACCOUNT);
>> + if (!pfns)
>> + return -ENOMEM;
>> +
>> + pr_debug("%s: GFN range 0x%llx-0x%llx, type %d\n", __func__,
>> + range->start, range->end, params.page_type);
>> +
>> + for (gfn = range->start, i = 0; gfn < range->end; gfn++, i++) {
>> + int order, level;
>> + void *kvaddr;
>> +
>> + ret = kvm_restricted_mem_get_pfn(memslot, gfn, &pfns[i], &order);
>> + if (ret)
>> + goto e_release;
>> +
>> + n++;
>> + ret = snp_lookup_rmpentry((u64)pfns[i], &level);
>> + if (ret) {
>> + pr_err("Failed to ensure GFN 0x%llx is in initial shared state, ret: %d\n",
>> + gfn, ret);
>> + return -EFAULT;
>> + }
>> +
>> + kvaddr = pfn_to_kaddr(pfns[i]);
>> + if (!virt_addr_valid(kvaddr)) {
>> + pr_err("Invalid HVA 0x%llx for GFN 0x%llx\n", (uint64_t)kvaddr, gfn);
>> + ret = -EINVAL;
>> + goto e_release;
>> + }
>> +
>> + ret = kvm_read_guest_page(kvm, gfn, kvaddr, 0, PAGE_SIZE);
>> + if (ret) {
>> + pr_err("Guest read failed, ret: 0x%x\n", ret);
>> + goto e_release;
>> + }
>> +
>> + ret = rmp_make_private(pfns[i], gfn << PAGE_SHIFT, PG_LEVEL_4K,
>> + sev_get_asid(kvm), true);
>> + if (ret) {
>> + ret = -EFAULT;
>> + goto e_release;
>> + }
>> +
>> + data.address = __sme_set(pfns[i] << PAGE_SHIFT);
>> + data.page_size = X86_TO_RMP_PG_LEVEL(PG_LEVEL_4K);
>> + data.page_type = params.page_type;
>> + data.vmpl3_perms = params.vmpl3_perms;
>> + data.vmpl2_perms = params.vmpl2_perms;
>> + data.vmpl1_perms = params.vmpl1_perms;
>> + ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
>> + &data, error);
>> + if (ret) {
>> + pr_err("SEV-SNP launch update failed, ret: 0x%x, fw_error: 0x%x\n",
>> + ret, *error);
>> + snp_page_reclaim(pfns[i]);
>> + goto e_release;
>
> When a launch update fails for a CPUID page with error `INVALID_PARAM` the
> firmware writes back corrected values. We should probably write these values
> back to userspace. Before UPM was introduced this happened automatically
> because we didn't copy the page to private memory and did the update
> completly in place.
>
Yes, pretty please!
Powered by blists - more mailing lists