[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210209032733.99996-20-gshan@redhat.com>
Date: Tue, 9 Feb 2021 11:27:31 +0800
From: Gavin Shan <gshan@...hat.com>
To: kvmarm@...ts.cs.columbia.edu
Cc: linux-kernel@...r.kernel.org, pbonzini@...hat.com, maz@...nel.org,
james.morse@....com, Jonathan.Cameron@...wei.com,
mark.rutland@....com, will@...nel.org, shan.gavin@...il.com
Subject: [PATCH v2 19/21] KVM: arm64: Support SDEI event cancellation
The injected SDEI event is to send notification to guest. The SDEI
event might not be needed after it's injected. This introduces API
to support cancellation on the injected SDEI event if it's not fired
to the guest yet.
This mechanism will be needed when we're going to support asynchronous
page fault.
Signed-off-by: Gavin Shan <gshan@...hat.com>
---
arch/arm64/include/asm/kvm_sdei.h | 1 +
arch/arm64/kvm/sdei.c | 49 +++++++++++++++++++++++++++++++
2 files changed, 50 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_sdei.h b/arch/arm64/include/asm/kvm_sdei.h
index 51087fe971ba..353744c7bad9 100644
--- a/arch/arm64/include/asm/kvm_sdei.h
+++ b/arch/arm64/include/asm/kvm_sdei.h
@@ -126,6 +126,7 @@ int kvm_sdei_register_notifier(struct kvm *kvm, unsigned long num,
kvm_sdei_notifier notifier);
int kvm_sdei_inject(struct kvm_vcpu *vcpu,
unsigned long num, bool immediate);
+int kvm_sdei_cancel(struct kvm_vcpu *vcpu, unsigned long num);
void kvm_sdei_deliver(struct kvm_vcpu *vcpu);
long kvm_sdei_vm_ioctl(struct kvm *kvm, unsigned long arg);
long kvm_sdei_vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long arg);
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index 7c2789cd1421..4f5a582daa97 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -907,6 +907,55 @@ int kvm_sdei_inject(struct kvm_vcpu *vcpu,
return ret;
}
+int kvm_sdei_cancel(struct kvm_vcpu *vcpu, unsigned long num)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
+ struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+ struct kvm_sdei_kvm_event *kske = NULL;
+ struct kvm_sdei_vcpu_event *ksve = NULL;
+ int ret = 0;
+
+ if (!(ksdei && vsdei)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* Find the vCPU event */
+ spin_lock(&vsdei->lock);
+ ksve = kvm_sdei_find_vcpu_event(vcpu, num);
+ if (!ksve) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Event can't be cancelled if it has been delivered */
+ if (ksve->state.refcount <= 1 &&
+ (vsdei->critical_event == ksve ||
+ vsdei->normal_event == ksve)) {
+ ret = -EINPROGRESS;
+ goto unlock;
+ }
+
+ /* Free the vCPU event if necessary */
+ kske = ksve->kske;
+ ksve->state.refcount--;
+ if (!ksve->state.refcount) {
+ list_del(&ksve->link);
+ kfree(ksve);
+ }
+
+unlock:
+ spin_unlock(&vsdei->lock);
+ if (kske) {
+ spin_lock(&ksdei->lock);
+ kske->state.refcount--;
+ spin_unlock(&ksdei->lock);
+ }
+out:
+ return ret;
+}
+
void kvm_sdei_deliver(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
--
2.23.0
Powered by blists - more mailing lists