[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210507083124.43347-5-gshan@redhat.com>
Date: Fri, 7 May 2021 16:31:07 +0800
From: Gavin Shan <gshan@...hat.com>
To: kvmarm@...ts.cs.columbia.edu
Cc: linux-kernel@...r.kernel.org, maz@...nel.org, will@...nel.org,
pbonzini@...hat.com, james.morse@....com, mark.rutland@....com,
Jonathan.Cameron@...wei.com, shan.gavin@...il.com
Subject: [PATCH v3 04/21] KVM: arm64: Support SDEI_EVENT_REGISTER hypercall
This supports SDEI_EVENT_REGISTER hypercall, which is used by guest
to register SDEI events. The SDEI event won't be raised to the guest
or specific vCPU until it's registered and enabled explicitly.
Only those events that have been exported by KVM can be registered.
After the event is registered successfully, the KVM SDEI event (object)
is created or updated because the same KVM SDEI event is shared by
multiple vCPUs if it's a private event.
Signed-off-by: Gavin Shan <gshan@...hat.com>
---
arch/arm64/kvm/sdei.c | 122 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 122 insertions(+)
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index aa9485f076a9..d3ea3eee154b 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -21,6 +21,20 @@ static struct kvm_sdei_event_state defined_kse[] = {
},
};
+static struct kvm_sdei_event *kvm_sdei_find_event(struct kvm *kvm,
+ unsigned long num)
+{
+ struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
+ struct kvm_sdei_event *kse;
+
+ list_for_each_entry(kse, &ksdei->events, link) {
+ if (kse->state.num == num)
+ return kse;
+ }
+
+ return NULL;
+}
+
static void kvm_sdei_remove_events(struct kvm *kvm)
{
struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
@@ -32,6 +46,20 @@ static void kvm_sdei_remove_events(struct kvm *kvm)
}
}
+static struct kvm_sdei_kvm_event *kvm_sdei_find_kvm_event(struct kvm *kvm,
+ unsigned long num)
+{
+ struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
+ struct kvm_sdei_kvm_event *kske;
+
+ list_for_each_entry(kske, &ksdei->kvm_events, link) {
+ if (kske->state.num == num)
+ return kske;
+ }
+
+ return NULL;
+}
+
static void kvm_sdei_remove_kvm_events(struct kvm *kvm,
unsigned int mask,
bool force)
@@ -86,6 +114,98 @@ static unsigned long kvm_sdei_hypercall_version(struct kvm_vcpu *vcpu)
return ret;
}
+static unsigned long kvm_sdei_hypercall_register(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
+ struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+ struct kvm_sdei_event *kse = NULL;
+ struct kvm_sdei_kvm_event *kske = NULL;
+ unsigned long event_num = smccc_get_arg1(vcpu);
+ unsigned long event_entry = smccc_get_arg2(vcpu);
+ unsigned long event_param = smccc_get_arg3(vcpu);
+ unsigned long route_mode = smccc_get_arg4(vcpu);
+ unsigned long route_affinity = smccc_get_arg5(vcpu);
+ int index = vcpu->vcpu_idx;
+ unsigned long ret = SDEI_SUCCESS;
+
+ /* Sanity check */
+ if (!(ksdei && vsdei)) {
+ ret = SDEI_NOT_SUPPORTED;
+ goto out;
+ }
+
+ if (!kvm_sdei_is_valid_event_num(event_num)) {
+ ret = SDEI_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ if (!(route_mode == SDEI_EVENT_REGISTER_RM_ANY ||
+ route_mode == SDEI_EVENT_REGISTER_RM_PE)) {
+ ret = SDEI_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ /*
+ * The KVM event could have been created if it's a private event.
+ * We needn't create a KVM event in this case.
+ */
+ spin_lock(&ksdei->lock);
+ kske = kvm_sdei_find_kvm_event(kvm, event_num);
+ if (kske) {
+ kse = kske->kse;
+ index = (kse->state.type == SDEI_EVENT_TYPE_PRIVATE) ?
+ vcpu->vcpu_idx : 0;
+
+ if (kvm_sdei_is_registered(kske, index)) {
+ ret = SDEI_DENIED;
+ goto unlock;
+ }
+
+ kske->state.route_mode = route_mode;
+ kske->state.route_affinity = route_affinity;
+ kske->state.entries[index] = event_entry;
+ kske->state.params[index] = event_param;
+ kvm_sdei_set_registered(kske, index);
+ goto unlock;
+ }
+
+ /* Check if the event number has been registered */
+ kse = kvm_sdei_find_event(kvm, event_num);
+ if (!kse) {
+ ret = SDEI_INVALID_PARAMETERS;
+ goto unlock;
+ }
+
+ /* Create KVM event */
+ kske = kzalloc(sizeof(*kske), GFP_KERNEL);
+ if (!kske) {
+ ret = SDEI_OUT_OF_RESOURCE;
+ goto unlock;
+ }
+
+ /* Initialize KVM event state */
+ index = (kse->state.type == SDEI_EVENT_TYPE_PRIVATE) ?
+ vcpu->vcpu_idx : 0;
+ kske->state.num = event_num;
+ kske->state.refcount = 0;
+ kske->state.route_mode = route_affinity;
+ kske->state.route_affinity = route_affinity;
+ kske->state.entries[index] = event_entry;
+ kske->state.params[index] = event_param;
+ kvm_sdei_set_registered(kske, index);
+
+ /* Initialize KVM event */
+ kske->kse = kse;
+ kske->kvm = kvm;
+ list_add_tail(&kske->link, &ksdei->kvm_events);
+
+unlock:
+ spin_unlock(&ksdei->lock);
+out:
+ return ret;
+}
+
int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
{
u32 func = smccc_get_function(vcpu);
@@ -97,6 +217,8 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
ret = kvm_sdei_hypercall_version(vcpu);
break;
case SDEI_1_0_FN_SDEI_EVENT_REGISTER:
+ ret = kvm_sdei_hypercall_register(vcpu);
+ break;
case SDEI_1_0_FN_SDEI_EVENT_ENABLE:
case SDEI_1_0_FN_SDEI_EVENT_DISABLE:
case SDEI_1_0_FN_SDEI_EVENT_CONTEXT:
--
2.23.0
Powered by blists - more mailing lists