[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <75151ba8-87fe-444a-b855-0d2e21b36e05@linux.intel.com>
Date: Mon, 11 Mar 2024 13:50:13 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: Michael Roth <michael.roth@....com>
Cc: kvm@...r.kernel.org, linux-coco@...ts.linux.dev, linux-mm@...ck.org,
linux-crypto@...r.kernel.org, x86@...nel.org, linux-kernel@...r.kernel.org,
tglx@...utronix.de, mingo@...hat.com, jroedel@...e.de,
thomas.lendacky@....com, hpa@...or.com, ardb@...nel.org,
pbonzini@...hat.com, seanjc@...gle.com, vkuznets@...hat.com,
jmattson@...gle.com, luto@...nel.org, dave.hansen@...ux.intel.com,
slp@...hat.com, pgonda@...gle.com, peterz@...radead.org,
srinivas.pandruvada@...ux.intel.com, rientjes@...gle.com,
dovmurik@...ux.ibm.com, tobin@....com, bp@...en8.de, vbabka@...e.cz,
kirill@...temov.name, ak@...ux.intel.com, tony.luck@...el.com,
sathyanarayanan.kuppuswamy@...ux.intel.com, alpergun@...gle.com,
jarkko@...nel.org, ashish.kalra@....com, nikunj.dadhania@....com,
pankaj.gupta@....com, liam.merwick@...cle.com, zhi.a.wang@...el.com
Subject: Re: [PATCH v11 28/35] KVM: SEV: Implement gmem hook for initializing
private pages
On 12/31/2023 1:23 AM, Michael Roth wrote:
> This will handle RMP table updates and direct map changes needed to put
> a page into a private state before mapping it into an SEV-SNP guest.
>
> Signed-off-by: Michael Roth <michael.roth@....com>
> ---
> arch/x86/kvm/Kconfig | 1 +
> arch/x86/kvm/svm/sev.c | 98 ++++++++++++++++++++++++++++++++++++++++++
> arch/x86/kvm/svm/svm.c | 2 +
> arch/x86/kvm/svm/svm.h | 1 +
> virt/kvm/guest_memfd.c | 4 +-
> 5 files changed, 104 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index 4ec53d6d5773..79c002e1bb5c 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -125,6 +125,7 @@ config KVM_AMD_SEV
> depends on KVM_AMD && X86_64
> depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
> select KVM_GENERIC_PRIVATE_MEM
> + select HAVE_KVM_GMEM_PREPARE
> help
> Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
> with Encrypted State (SEV-ES) on AMD processors.
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index b2ac696c436a..91f53f4a6059 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -4154,3 +4154,101 @@ void handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
> out:
> put_page(pfn_to_page(pfn));
> }
> +
> +static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
> +{
> + kvm_pfn_t pfn = start;
> +
> + while (pfn < end) {
> + int ret, rmp_level;
> + bool assigned;
> +
> + ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
> + if (ret) {
> + pr_warn_ratelimited("SEV: Failed to retrieve RMP entry: PFN 0x%llx GFN start 0x%llx GFN end 0x%llx RMP level %d error %d\n",
> + pfn, start, end, rmp_level, ret);
> + return false;
> + }
> +
> + if (assigned) {
> + pr_debug("%s: overlap detected, PFN 0x%llx start 0x%llx end 0x%llx RMP level %d\n",
> + __func__, pfn, start, end, rmp_level);
> + return false;
> + }
> +
> + pfn++;
rmp_level can be got from snp_lookup_rmpentry().
I think the pfn can be updated according to rmp_level to avoid unnecessary
loops for 2MB large page, right?
> + }
> +
> + return true;
> +}
> +
> +static u8 max_level_for_order(int order)
> +{
> + if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
> + return PG_LEVEL_2M;
> +
> + return PG_LEVEL_4K;
> +}
> +
> +static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
> +{
> + kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
> +
> + /*
> + * If this is a large folio, and the entire 2M range containing the
> + * PFN is currently shared, then the entire 2M-aligned range can be
> + * set to private via a single 2M RMP entry.
> + */
> + if (max_level_for_order(order) > PG_LEVEL_4K &&
> + is_pfn_range_shared(pfn_aligned, pfn_aligned + PTRS_PER_PMD))
> + return true;
> +
> + return false;
> +}
> +
> +int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
> +{
> + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + kvm_pfn_t pfn_aligned;
> + gfn_t gfn_aligned;
> + int level, rc;
> + bool assigned;
> +
> + if (!sev_snp_guest(kvm))
> + return 0;
> +
> + rc = snp_lookup_rmpentry(pfn, &assigned, &level);
> + if (rc) {
> + pr_err_ratelimited("SEV: Failed to look up RMP entry: GFN %llx PFN %llx error %d\n",
> + gfn, pfn, rc);
> + return -ENOENT;
> + }
> +
> + if (assigned) {
> + pr_debug("%s: already assigned: gfn %llx pfn %llx max_order %d level %d\n",
> + __func__, gfn, pfn, max_order, level);
> + return 0;
> + }
> +
> + if (is_large_rmp_possible(kvm, pfn, max_order)) {
> + level = PG_LEVEL_2M;
> + pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
> + gfn_aligned = ALIGN_DOWN(gfn, PTRS_PER_PMD);
> + } else {
> + level = PG_LEVEL_4K;
> + pfn_aligned = pfn;
> + gfn_aligned = gfn;
> + }
> +
> + rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false);
> + if (rc) {
> + pr_err_ratelimited("SEV: Failed to update RMP entry: GFN %llx PFN %llx level %d error %d\n",
> + gfn, pfn, level, rc);
> + return -EINVAL;
> + }
> +
> + pr_debug("%s: updated: gfn %llx pfn %llx pfn_aligned %llx max_order %d level %d\n",
> + __func__, gfn, pfn, pfn_aligned, max_order, level);
> +
> + return 0;
> +}
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 240518f8d6c7..32cef8626b57 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -5065,6 +5065,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
> .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
> .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
> .alloc_apic_backing_page = svm_alloc_apic_backing_page,
> +
> + .gmem_prepare = sev_gmem_prepare,
> };
>
> /*
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index d953ae41c619..9ece9612dbb9 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -725,6 +725,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
> struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
> void handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
> void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
> +int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
>
> /* vmenter.S */
>
> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> index feec0da93d98..ddea45279fef 100644
> --- a/virt/kvm/guest_memfd.c
> +++ b/virt/kvm/guest_memfd.c
> @@ -66,8 +66,8 @@ static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct fol
> gfn = slot->base_gfn + index - slot->gmem.pgoff;
> rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, compound_order(compound_head(page)));
> if (rc) {
> - pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx, error %d.\n",
> - index, rc);
> + pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
> + index, gfn, pfn, rc);
> return rc;
> }
> }
Powered by blists - more mailing lists