[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220322080710.51727-11-gshan@redhat.com>
Date: Tue, 22 Mar 2022 16:06:58 +0800
From: Gavin Shan <gshan@...hat.com>
To: kvmarm@...ts.cs.columbia.edu
Cc: linux-kernel@...r.kernel.org, eauger@...hat.com,
shannon.zhaosl@...il.com, maz@...nel.org,
Jonathan.Cameron@...wei.com, will@...nel.org, pbonzini@...hat.com,
james.morse@....com, mark.rutland@....com, drjones@...hat.com,
vkuznets@...hat.com, shan.gavin@...il.com
Subject: [PATCH v5 10/22] KVM: arm64: Support SDEI_EVENT_ROUTING_SET hypercall
This supports SDEI_EVENT_ROUTING_SET hypercall. It's used by the
guest to set route mode and affinity for the shared and registered
events. The request to configure the routing mode and affinity for
the private events are disallowed. Besides, It's not allowed to do
when the corresponding vCPU events are existing.
Signed-off-by: Gavin Shan <gshan@...hat.com>
---
arch/arm64/kvm/sdei.c | 62 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 62 insertions(+)
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index 4f26e5f70bff..db82ea441eae 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -565,6 +565,66 @@ static unsigned long hypercall_info(struct kvm_vcpu *vcpu)
return ret;
}
+static unsigned long hypercall_route(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
+ struct kvm_sdei_exposed_event *exposed_event;
+ struct kvm_sdei_registered_event *registered_event;
+ unsigned long event_num = smccc_get_arg1(vcpu);
+ unsigned long route_mode = smccc_get_arg2(vcpu);
+ unsigned long route_affinity = smccc_get_arg3(vcpu);
+ int index = 0;
+ unsigned long ret = SDEI_SUCCESS;
+
+ if (!kvm_sdei_is_supported(event_num)) {
+ ret = SDEI_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ /*
+ * FIXME: The affinity should be verified when it's supported. We
+ * accept anything for now.
+ */
+ if (route_mode != SDEI_EVENT_REGISTER_RM_ANY &&
+ route_mode != SDEI_EVENT_REGISTER_RM_PE) {
+ ret = SDEI_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ spin_lock(&ksdei->lock);
+
+ /* Check if the registered event exists */
+ registered_event = find_registered_event(kvm, event_num);
+ if (!registered_event) {
+ ret = SDEI_INVALID_PARAMETERS;
+ goto unlock;
+ }
+
+ /* Check the registered event is a shared one */
+ exposed_event = registered_event->exposed_event;
+ if (!kvm_sdei_is_shared(exposed_event->state.type)) {
+ ret = SDEI_DENIED;
+ goto unlock;
+ }
+
+ if (!kvm_sdei_is_registered(registered_event, index) ||
+ kvm_sdei_is_enabled(registered_event, index) ||
+ registered_event->vcpu_event_count > 0) {
+ ret = SDEI_DENIED;
+ goto unlock;
+ }
+
+ /* Update the registered event state */
+ registered_event->state.route_mode = route_mode;
+ registered_event->state.route_affinity = route_affinity;
+
+unlock:
+ spin_unlock(&ksdei->lock);
+out:
+ return ret;
+}
+
int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
@@ -617,6 +677,8 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
ret = hypercall_info(vcpu);
break;
case SDEI_1_0_FN_SDEI_EVENT_ROUTING_SET:
+ ret = hypercall_route(vcpu);
+ break;
case SDEI_1_0_FN_SDEI_PE_MASK:
case SDEI_1_0_FN_SDEI_PE_UNMASK:
case SDEI_1_0_FN_SDEI_INTERRUPT_BIND:
--
2.23.0
Powered by blists - more mailing lists