[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <11daeb05-b33d-01a4-e84d-40148943910f@amd.com>
Date: Mon, 24 Feb 2025 16:57:12 -0600
From: Tom Lendacky <thomas.lendacky@....com>
To: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Naveen N Rao <naveen@...nel.org>, Kim Phillips <kim.phillips@....com>,
Alexey Kardashevskiy <aik@....com>
Subject: Re: [PATCH 09/10] KVM: SVM: Use guard(mutex) to simplify SNP vCPU
state updates
On 2/18/25 19:27, Sean Christopherson wrote:
> Use guard(mutex) in sev_snp_init_protected_guest_state() and pull in its
> lock-protected inner helper. Without an unlock trampoline (and even with
> one), there is no real need for an inner helper. Eliminating the helper
> also avoids having to fixup the open coded "lockdep" WARN_ON().
>
> Opportunistically drop the error message if KVM can't obtain the pfn for
> the new target VMSA. The error message provides zero information that
> can't be gleaned from the fact that the vCPU is stuck.
Reviewed-by: Tom Lendacky <thomas.lendacky@....com>
>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> arch/x86/kvm/svm/sev.c | 122 ++++++++++++++++++-----------------------
> 1 file changed, 53 insertions(+), 69 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 3a531232c3a1..15c324b61b24 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -3839,11 +3839,26 @@ static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
> BUG();
> }
>
> -static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
> +/*
> + * Invoked as part of svm_vcpu_reset() processing of an init event.
> + */
> +void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
> {
> struct vcpu_svm *svm = to_svm(vcpu);
> + struct kvm_memory_slot *slot;
> + struct page *page;
> + kvm_pfn_t pfn;
> + gfn_t gfn;
>
> - WARN_ON(!mutex_is_locked(&svm->sev_es.snp_vmsa_mutex));
> + if (!sev_snp_guest(vcpu->kvm))
> + return;
> +
> + guard(mutex)(&svm->sev_es.snp_vmsa_mutex);
> +
> + if (!svm->sev_es.snp_ap_waiting_for_reset)
> + return;
> +
> + svm->sev_es.snp_ap_waiting_for_reset = false;
>
> /* Mark the vCPU as offline and not runnable */
> vcpu->arch.pv.pv_unhalted = false;
> @@ -3858,78 +3873,47 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
> */
> vmcb_mark_all_dirty(svm->vmcb);
>
> - if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) {
> - gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
> - struct kvm_memory_slot *slot;
> - struct page *page;
> - kvm_pfn_t pfn;
> -
> - slot = gfn_to_memslot(vcpu->kvm, gfn);
> - if (!slot)
> - return -EINVAL;
> -
> - /*
> - * The new VMSA will be private memory guest memory, so
> - * retrieve the PFN from the gmem backend.
> - */
> - if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
> - return -EINVAL;
> -
> - /*
> - * From this point forward, the VMSA will always be a
> - * guest-mapped page rather than the initial one allocated
> - * by KVM in svm->sev_es.vmsa. In theory, svm->sev_es.vmsa
> - * could be free'd and cleaned up here, but that involves
> - * cleanups like wbinvd_on_all_cpus() which would ideally
> - * be handled during teardown rather than guest boot.
> - * Deferring that also allows the existing logic for SEV-ES
> - * VMSAs to be re-used with minimal SNP-specific changes.
> - */
> - svm->sev_es.snp_has_guest_vmsa = true;
> -
> - /* Use the new VMSA */
> - svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn);
> -
> - /* Mark the vCPU as runnable */
> - kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
> -
> - svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
> -
> - /*
> - * gmem pages aren't currently migratable, but if this ever
> - * changes then care should be taken to ensure
> - * svm->sev_es.vmsa is pinned through some other means.
> - */
> - kvm_release_page_clean(page);
> - }
> -
> - return 0;
> -}
> -
> -/*
> - * Invoked as part of svm_vcpu_reset() processing of an init event.
> - */
> -void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
> -{
> - struct vcpu_svm *svm = to_svm(vcpu);
> - int ret;
> -
> - if (!sev_snp_guest(vcpu->kvm))
> + if (!VALID_PAGE(svm->sev_es.snp_vmsa_gpa))
> return;
>
> - mutex_lock(&svm->sev_es.snp_vmsa_mutex);
> + gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
>
> - if (!svm->sev_es.snp_ap_waiting_for_reset)
> - goto unlock;
> -
> - svm->sev_es.snp_ap_waiting_for_reset = false;
> + slot = gfn_to_memslot(vcpu->kvm, gfn);
> + if (!slot)
> + return;
>
> - ret = __sev_snp_update_protected_guest_state(vcpu);
> - if (ret)
> - vcpu_unimpl(vcpu, "snp: AP state update on init failed\n");
> + /*
> + * The new VMSA will be private memory guest memory, so retrieve the
> + * PFN from the gmem backend.
> + */
> + if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
> + return;
>
> -unlock:
> - mutex_unlock(&svm->sev_es.snp_vmsa_mutex);
> + /*
> + * From this point forward, the VMSA will always be a guest-mapped page
> + * rather than the initial one allocated by KVM in svm->sev_es.vmsa. In
> + * theory, svm->sev_es.vmsa could be free'd and cleaned up here, but
> + * that involves cleanups like wbinvd_on_all_cpus() which would ideally
> + * be handled during teardown rather than guest boot. Deferring that
> + * also allows the existing logic for SEV-ES VMSAs to be re-used with
> + * minimal SNP-specific changes.
> + */
> + svm->sev_es.snp_has_guest_vmsa = true;
> +
> + /* Use the new VMSA */
> + svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn);
> +
> + /* Mark the vCPU as runnable */
> + kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
> +
> + svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
> +
> + /*
> + * gmem pages aren't currently migratable, but if this ever changes
> + * then care should be taken to ensure svm->sev_es.vmsa is pinned
> + * through some other means.
> + */
> + kvm_release_page_clean(page);
> }
>
> static int sev_snp_ap_creation(struct vcpu_svm *svm)
Powered by blists - more mailing lists