[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CABpDEukAEGwb9w12enO=fhSbHbchypsOdO2dkR4Jei3wDW6NWg@mail.gmail.com>
Date: Wed, 17 Aug 2022 20:47:33 -0700
From: Alper Gun <alpergun@...gle.com>
To: Ashish Kalra <Ashish.Kalra@....com>
Cc: x86@...nel.org, linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
linux-coco@...ts.linux.dev, linux-mm@...ck.org,
linux-crypto@...r.kernel.org, tglx@...utronix.de, mingo@...hat.com,
jroedel@...e.de, thomas.lendacky@....com, hpa@...or.com,
ardb@...nel.org, pbonzini@...hat.com, seanjc@...gle.com,
vkuznets@...hat.com, jmattson@...gle.com, luto@...nel.org,
dave.hansen@...ux.intel.com, slp@...hat.com, pgonda@...gle.com,
peterz@...radead.org, srinivas.pandruvada@...ux.intel.com,
rientjes@...gle.com, dovmurik@...ux.ibm.com, tobin@....com,
bp@...en8.de, michael.roth@....com, vbabka@...e.cz,
kirill@...temov.name, ak@...ux.intel.com, tony.luck@...el.com,
marcorr@...gle.com, sathyanarayanan.kuppuswamy@...ux.intel.com,
dgilbert@...hat.com, jarkko@...nel.org
Subject: Re: [PATCH Part2 v6 39/49] KVM: SVM: Introduce ops for the post gfn
map and unmap
On Mon, Jun 20, 2022 at 4:12 PM Ashish Kalra <Ashish.Kalra@....com> wrote:
>
> From: Brijesh Singh <brijesh.singh@....com>
>
> When SEV-SNP is enabled in the guest VM, the guest memory pages can
> either be a private or shared. A write from the hypervisor goes through
> the RMP checks. If hardware sees that hypervisor is attempting to write
> to a guest private page, then it triggers an RMP violation #PF.
>
> To avoid the RMP violation with GHCB pages, added new post_{map,unmap}_gfn
> functions to verify if its safe to map GHCB pages. Uses a spinlock to
> protect against the page state change for existing mapped pages.
>
> Need to add generic post_{map,unmap}_gfn() ops that can be used to verify
> that its safe to map a given guest page in the hypervisor.
>
> This patch will need to be revisited later after consensus is reached on
> how to manage guest private memory as probably UPM private memslots will
> be able to handle this page state change more gracefully.
>
> Signed-off-by: Brijesh Singh <brijesh.singh@....com>
> Signed-off by: Ashish Kalra <ashish.kalra@....com>
> ---
> arch/x86/include/asm/kvm-x86-ops.h | 1 +
> arch/x86/include/asm/kvm_host.h | 3 ++
> arch/x86/kvm/svm/sev.c | 48 ++++++++++++++++++++++++++++--
> arch/x86/kvm/svm/svm.c | 3 ++
> arch/x86/kvm/svm/svm.h | 11 +++++++
> 5 files changed, 64 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index e0068e702692..2dd2bc0cf4c3 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -130,6 +130,7 @@ KVM_X86_OP(vcpu_deliver_sipi_vector)
> KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
> KVM_X86_OP(alloc_apic_backing_page)
> KVM_X86_OP_OPTIONAL(rmp_page_level_adjust)
> +KVM_X86_OP(update_protected_guest_state)
>
> #undef KVM_X86_OP
> #undef KVM_X86_OP_OPTIONAL
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 49b217dc8d7e..8abc0e724f5c 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1522,7 +1522,10 @@ struct kvm_x86_ops {
> unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
>
> void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
> +
> void (*rmp_page_level_adjust)(struct kvm *kvm, kvm_pfn_t pfn, int *level);
> +
> + int (*update_protected_guest_state)(struct kvm_vcpu *vcpu);
> };
>
> struct kvm_x86_nested_ops {
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index cb2d1bbb862b..4ed90331bca0 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -341,6 +341,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
> if (ret)
> goto e_free;
>
> + spin_lock_init(&sev->psc_lock);
> ret = sev_snp_init(&argp->error);
> } else {
> ret = sev_platform_init(&argp->error);
> @@ -2828,19 +2829,28 @@ static inline int svm_map_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map)
> {
> struct vmcb_control_area *control = &svm->vmcb->control;
> u64 gfn = gpa_to_gfn(control->ghcb_gpa);
> + struct kvm_vcpu *vcpu = &svm->vcpu;
>
> - if (kvm_vcpu_map(&svm->vcpu, gfn, map)) {
> + if (kvm_vcpu_map(vcpu, gfn, map)) {
> /* Unable to map GHCB from guest */
> pr_err("error mapping GHCB GFN [%#llx] from guest\n", gfn);
> return -EFAULT;
> }
>
> + if (sev_post_map_gfn(vcpu->kvm, map->gfn, map->pfn)) {
> + kvm_vcpu_unmap(vcpu, map, false);
> + return -EBUSY;
> + }
> +
> return 0;
> }
>
> static inline void svm_unmap_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map)
> {
> - kvm_vcpu_unmap(&svm->vcpu, map, true);
> + struct kvm_vcpu *vcpu = &svm->vcpu;
> +
> + kvm_vcpu_unmap(vcpu, map, true);
> + sev_post_unmap_gfn(vcpu->kvm, map->gfn, map->pfn);
> }
>
> static void dump_ghcb(struct vcpu_svm *svm)
> @@ -3383,6 +3393,8 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
> return PSC_UNDEF_ERR;
> }
>
> + spin_lock(&sev->psc_lock);
> +
> write_lock(&kvm->mmu_lock);
>
> rc = kvm_mmu_get_tdp_walk(vcpu, gpa, &pfn, &npt_level);
> @@ -3417,6 +3429,8 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>
> write_unlock(&kvm->mmu_lock);
>
> + spin_unlock(&sev->psc_lock);
There is a corner case where the psc_lock is not released. If
kvm_mmu_get_tdp_walk fails, the lock will be kept and will cause soft
lockup.
> +
> if (rc) {
> pr_err_ratelimited("Error op %d gpa %llx pfn %llx level %d rc %d\n",
> op, gpa, pfn, level, rc);
> @@ -3965,3 +3979,33 @@ void sev_rmp_page_level_adjust(struct kvm *kvm, kvm_pfn_t pfn, int *level)
> /* Adjust the level to keep the NPT and RMP in sync */
> *level = min_t(size_t, *level, rmp_level);
> }
> +
> +int sev_post_map_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn)
> +{
> + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + int level;
> +
> + if (!sev_snp_guest(kvm))
> + return 0;
> +
> + spin_lock(&sev->psc_lock);
> +
> + /* If pfn is not added as private then fail */
> + if (snp_lookup_rmpentry(pfn, &level) == 1) {
> + spin_unlock(&sev->psc_lock);
> + pr_err_ratelimited("failed to map private gfn 0x%llx pfn 0x%llx\n", gfn, pfn);
> + return -EBUSY;
> + }
> +
> + return 0;
> +}
> +
> +void sev_post_unmap_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn)
> +{
> + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> +
> + if (!sev_snp_guest(kvm))
> + return;
> +
> + spin_unlock(&sev->psc_lock);
> +}
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index b24e0171cbf2..1c8e035ba011 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4734,7 +4734,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
> .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
>
> .alloc_apic_backing_page = svm_alloc_apic_backing_page,
> +
> .rmp_page_level_adjust = sev_rmp_page_level_adjust,
> +
> + .update_protected_guest_state = sev_snp_update_protected_guest_state,
> };
>
> /*
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 54ff56cb6125..3fd95193ed8d 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -79,19 +79,25 @@ struct kvm_sev_info {
> bool active; /* SEV enabled guest */
> bool es_active; /* SEV-ES enabled guest */
> bool snp_active; /* SEV-SNP enabled guest */
> +
> unsigned int asid; /* ASID used for this guest */
> unsigned int handle; /* SEV firmware handle */
> int fd; /* SEV device fd */
> +
> unsigned long pages_locked; /* Number of pages locked */
> struct list_head regions_list; /* List of registered regions */
> +
> u64 ap_jump_table; /* SEV-ES AP Jump Table address */
> +
> struct kvm *enc_context_owner; /* Owner of copied encryption context */
> struct list_head mirror_vms; /* List of VMs mirroring */
> struct list_head mirror_entry; /* Use as a list entry of mirrors */
> struct misc_cg *misc_cg; /* For misc cgroup accounting */
> atomic_t migration_in_progress;
> +
> u64 snp_init_flags;
> void *snp_context; /* SNP guest context page */
> + spinlock_t psc_lock;
> };
>
> struct kvm_svm {
> @@ -702,6 +708,11 @@ void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
> void sev_es_unmap_ghcb(struct vcpu_svm *svm);
> struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
> void sev_rmp_page_level_adjust(struct kvm *kvm, kvm_pfn_t pfn, int *level);
> +int sev_post_map_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn);
> +void sev_post_unmap_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn);
> +void handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
> +void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
> +int sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu);
>
> /* vmenter.S */
>
> --
> 2.25.1
>
Powered by blists - more mailing lists