[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y8cvsS27o1BaUNPz@kernel.org>
Date: Wed, 18 Jan 2023 01:30:57 +0200
From: Jarkko Sakkinen <jarkko@...nel.org>
To: Michael Roth <michael.roth@....com>
Cc: kvm@...r.kernel.org, linux-coco@...ts.linux.dev,
linux-mm@...ck.org, linux-crypto@...r.kernel.org, x86@...nel.org,
linux-kernel@...r.kernel.org, tglx@...utronix.de, mingo@...hat.com,
jroedel@...e.de, thomas.lendacky@....com, hpa@...or.com,
ardb@...nel.org, pbonzini@...hat.com, seanjc@...gle.com,
vkuznets@...hat.com, wanpengli@...cent.com, jmattson@...gle.com,
luto@...nel.org, dave.hansen@...ux.intel.com, slp@...hat.com,
pgonda@...gle.com, peterz@...radead.org,
srinivas.pandruvada@...ux.intel.com, rientjes@...gle.com,
dovmurik@...ux.ibm.com, tobin@....com, bp@...en8.de,
vbabka@...e.cz, kirill@...temov.name, ak@...ux.intel.com,
tony.luck@...el.com, marcorr@...gle.com,
sathyanarayanan.kuppuswamy@...ux.intel.com, alpergun@...gle.com,
dgilbert@...hat.com, ashish.kalra@....com, harald@...fian.com,
Nikunj A Dadhania <nikunj@....com>
Subject: Re: [PATCH RFC v7 11/64] KVM: SEV: Support private pages in
LAUNCH_UPDATE_DATA
On Wed, Dec 14, 2022 at 01:40:03PM -0600, Michael Roth wrote:
> From: Nikunj A Dadhania <nikunj@....com>
>
> Pre-boot guest payload needs to be encrypted and VMM has copied it
> over to the private-fd. Add support to get the pfn from the memfile fd
> for encrypting the payload in-place.
>
> Signed-off-by: Nikunj A Dadhania <nikunj@....com>
> Signed-off-by: Michael Roth <michael.roth@....com>
> ---
> arch/x86/kvm/svm/sev.c | 79 ++++++++++++++++++++++++++++++++++--------
> 1 file changed, 64 insertions(+), 15 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index a7e4e3005786..ae4920aeb281 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -107,6 +107,11 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
> return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
> }
>
> +static bool kvm_is_upm_enabled(struct kvm *kvm)
> +{
> + return kvm->arch.upm_mode;
> +}
> +
> /* Must be called with the sev_bitmap_lock held */
> static bool __sev_recycle_asids(int min_asid, int max_asid)
> {
> @@ -382,6 +387,38 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
> return ret;
> }
>
> +static int sev_get_memfile_pfn_handler(struct kvm *kvm, struct kvm_gfn_range *range, void *data)
> +{
> + struct kvm_memory_slot *memslot = range->slot;
> + struct page **pages = data;
> + int ret = 0, i = 0;
> + kvm_pfn_t pfn;
> + gfn_t gfn;
> +
> + for (gfn = range->start; gfn < range->end; gfn++) {
> + int order;
> +
> + ret = kvm_restricted_mem_get_pfn(memslot, gfn, &pfn, &order);
> + if (ret)
> + return ret;
> +
> + if (is_error_noslot_pfn(pfn))
> + return -EFAULT;
> +
> + pages[i++] = pfn_to_page(pfn);
> + }
> +
> + return ret;
> +}
> +
> +static int sev_get_memfile_pfn(struct kvm *kvm, unsigned long addr,
> + unsigned long size, unsigned long npages,
> + struct page **pages)
> +{
> + return kvm_vm_do_hva_range_op(kvm, addr, size,
> + sev_get_memfile_pfn_handler, pages);
> +}
> +
> static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
> unsigned long ulen, unsigned long *n,
> int write)
> @@ -424,16 +461,25 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
> if (!pages)
> return ERR_PTR(-ENOMEM);
>
> - /* Pin the user virtual address. */
> - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
> - if (npinned != npages) {
> - pr_err("SEV: Failure locking %lu pages.\n", npages);
> - ret = -ENOMEM;
> - goto err;
> + if (kvm_is_upm_enabled(kvm)) {
> + /* Get the PFN from memfile */
> + if (sev_get_memfile_pfn(kvm, uaddr, ulen, npages, pages)) {
> + pr_err("%s: ERROR: unable to find slot for uaddr %lx", __func__, uaddr);
> + ret = -ENOMEM;
> + goto err;
> + }
> + } else {
> + /* Pin the user virtual address. */
> + npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
> + if (npinned != npages) {
> + pr_err("SEV: Failure locking %lu pages.\n", npages);
> + ret = -ENOMEM;
> + goto err;
> + }
> + sev->pages_locked = locked;
> }
>
> *n = npages;
> - sev->pages_locked = locked;
>
> return pages;
>
> @@ -514,6 +560,7 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm,
>
> size = (range->end - range->start) << PAGE_SHIFT;
> vaddr_end = vaddr + size;
> + WARN_ON(size < PAGE_SIZE);
>
> /* Lock the user memory. */
> inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
> @@ -554,13 +601,16 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm,
> }
>
> e_unpin:
> - /* content of memory is updated, mark pages dirty */
> - for (i = 0; i < npages; i++) {
> - set_page_dirty_lock(inpages[i]);
> - mark_page_accessed(inpages[i]);
> + if (!kvm_is_upm_enabled(kvm)) {
> + /* content of memory is updated, mark pages dirty */
> + for (i = 0; i < npages; i++) {
> + set_page_dirty_lock(inpages[i]);
> + mark_page_accessed(inpages[i]);
> + }
> + /* unlock the user pages */
> + sev_unpin_memory(kvm, inpages, npages);
> }
> - /* unlock the user pages */
> - sev_unpin_memory(kvm, inpages, npages);
> +
> return ret;
> }
>
> @@ -609,9 +659,8 @@ static int sev_launch_update_priv_gfn_handler(struct kvm *kvm,
> goto e_ret;
> kvm_release_pfn_clean(pfn);
> }
> - kvm_vm_set_region_attr(kvm, range->start, range->end,
> - true /* priv_attr */);
>
> + kvm_vm_set_region_attr(kvm, range->start, range->end, KVM_MEMORY_ATTRIBUTE_PRIVATE);
> e_ret:
> return ret;
> }
> --
> 2.25.1
>
kvm_vm_set_region_attr() should be fixed already in:
https://lore.kernel.org/all/20221214194056.161492-11-michael.roth@amd.com/
BR, Jarkko
Powered by blists - more mailing lists