[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6557af7a-ca00-d9dd-c970-3e85c81d1582@redhat.com>
Date: Wed, 10 Nov 2021 15:09:57 +0100
From: Eric Auger <eauger@...hat.com>
To: Gavin Shan <gshan@...hat.com>, kvmarm@...ts.cs.columbia.edu
Cc: maz@...nel.org, linux-kernel@...r.kernel.org,
Jonathan.Cameron@...wei.com, pbonzini@...hat.com, will@...nel.org
Subject: Re: [PATCH v4 19/21] KVM: arm64: Support SDEI event cancellation
On 8/15/21 2:13 AM, Gavin Shan wrote:
> The injected SDEI event is to send notification to guest. The SDEI
> event might not be needed after it's injected. This introduces API
> to support cancellation on the injected SDEI event if it's not fired
> to the guest yet.
>
> This mechanism will be needed when we're going to support asynchronous
> page fault.
if we are able to manage the migration of an executing SDEI why can't we
manage the migration of pending SDEIs?
Eric
>
> Signed-off-by: Gavin Shan <gshan@...hat.com>
> ---
> arch/arm64/include/asm/kvm_sdei.h | 1 +
> arch/arm64/kvm/sdei.c | 49 +++++++++++++++++++++++++++++++
> 2 files changed, 50 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_sdei.h b/arch/arm64/include/asm/kvm_sdei.h
> index 51087fe971ba..353744c7bad9 100644
> --- a/arch/arm64/include/asm/kvm_sdei.h
> +++ b/arch/arm64/include/asm/kvm_sdei.h
> @@ -126,6 +126,7 @@ int kvm_sdei_register_notifier(struct kvm *kvm, unsigned long num,
> kvm_sdei_notifier notifier);
> int kvm_sdei_inject(struct kvm_vcpu *vcpu,
> unsigned long num, bool immediate);
> +int kvm_sdei_cancel(struct kvm_vcpu *vcpu, unsigned long num);
> void kvm_sdei_deliver(struct kvm_vcpu *vcpu);
> long kvm_sdei_vm_ioctl(struct kvm *kvm, unsigned long arg);
> long kvm_sdei_vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long arg);
> diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
> index 7c2789cd1421..4f5a582daa97 100644
> --- a/arch/arm64/kvm/sdei.c
> +++ b/arch/arm64/kvm/sdei.c
> @@ -907,6 +907,55 @@ int kvm_sdei_inject(struct kvm_vcpu *vcpu,
> return ret;
> }
>
> +int kvm_sdei_cancel(struct kvm_vcpu *vcpu, unsigned long num)
> +{
> + struct kvm *kvm = vcpu->kvm;
> + struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
> + struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
> + struct kvm_sdei_kvm_event *kske = NULL;
> + struct kvm_sdei_vcpu_event *ksve = NULL;
> + int ret = 0;
> +
> + if (!(ksdei && vsdei)) {
> + ret = -EPERM;
> + goto out;
> + }
> +
> + /* Find the vCPU event */
> + spin_lock(&vsdei->lock);
> + ksve = kvm_sdei_find_vcpu_event(vcpu, num);
> + if (!ksve) {
> + ret = -EINVAL;
> + goto unlock;
> + }
> +
> + /* Event can't be cancelled if it has been delivered */
> + if (ksve->state.refcount <= 1 &&
> + (vsdei->critical_event == ksve ||
> + vsdei->normal_event == ksve)) {
> + ret = -EINPROGRESS;
> + goto unlock;
> + }
> +
> + /* Free the vCPU event if necessary */
> + kske = ksve->kske;
> + ksve->state.refcount--;
> + if (!ksve->state.refcount) {
> + list_del(&ksve->link);
> + kfree(ksve);
> + }
> +
> +unlock:
> + spin_unlock(&vsdei->lock);
> + if (kske) {
> + spin_lock(&ksdei->lock);
> + kske->state.refcount--;
> + spin_unlock(&ksdei->lock);
> + }
> +out:
> + return ret;
> +}
> +
> void kvm_sdei_deliver(struct kvm_vcpu *vcpu)
> {
> struct kvm *kvm = vcpu->kvm;
>
Powered by blists - more mailing lists