[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221224085304.1629042-1-pbonzini@redhat.com>
Date: Sat, 24 Dec 2022 03:53:04 -0500
From: Paolo Bonzini <pbonzini@...hat.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: paul@....org, seanjc@...gle.com, dwmw2@...radead.org,
Michal Luczaj <mhal@...x.co>
Subject: [PATCH] KVM: x86/xen: Fix SRCU/RCU usage in readers of evtchn_ports
evtchnfd must be protected by either kvm->lock or SRCU. Use
the former in kvm_xen_eventfd_update(), since the lock is being
taken anyway; kvm_xen_hcall_evtchn_send() instead is a reader
and does not need kvm->lock, so extend the SRCU critical section
there.
It is also important to use rcu_read_{lock,unlock}() in
kvm_xen_hcall_evtchn_send(), because idr_remove() will *not*
use synchronize_srcu() to wait for readers to complete.
Co-developed-by: Michal Luczaj <mhal@...x.co>
Signed-off-by: Michal Luczaj <mhal@...x.co>
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
arch/x86/kvm/xen.c | 36 +++++++++++++++++++++++++-----------
1 file changed, 25 insertions(+), 11 deletions(-)
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index d7af40240248..935f845d005c 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -1825,20 +1825,23 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
{
u32 port = data->u.evtchn.send_port;
struct evtchnfd *evtchnfd;
+ int ret;
if (!port || port >= max_evtchn_port(kvm))
return -EINVAL;
+ /* Protect writes to evtchnfd as well as the idr lookup. */
mutex_lock(&kvm->lock);
evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
- mutex_unlock(&kvm->lock);
+ ret = -ENOENT;
if (!evtchnfd)
- return -ENOENT;
+ goto out_unlock;
/* For an UPDATE, nothing may change except the priority/vcpu */
+ ret = -EINVAL;
if (evtchnfd->type != data->u.evtchn.type)
- return -EINVAL;
+ goto out_unlock;
/*
* Port cannot change, and if it's zero that was an eventfd
@@ -1846,20 +1849,21 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
*/
if (!evtchnfd->deliver.port.port ||
evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
- return -EINVAL;
+ goto out_unlock;
/* We only support 2 level event channels for now */
if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
- return -EINVAL;
+ goto out_unlock;
- mutex_lock(&kvm->lock);
evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
evtchnfd->deliver.port.vcpu_idx = -1;
}
+ ret = 0;
+out_unlock:
mutex_unlock(&kvm->lock);
- return 0;
+ return ret;
}
/*
@@ -2005,19 +2009,23 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
gpa_t gpa;
int idx;
+ /*
+ * evtchnfd is protected by kvm->srcu; the idr lookup instead
+ * is protected by RCU.
+ */
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) {
*r = -EFAULT;
- return true;
+ goto out_handled;
}
- /* The evtchn_ports idr is protected by vcpu->kvm->srcu */
+ rcu_read_lock();
evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
+ rcu_read_unlock();
if (!evtchnfd)
- return false;
+ goto out_not_handled;
if (evtchnfd->deliver.port.port) {
int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
@@ -2028,7 +2036,13 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
}
*r = 0;
+out_handled:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
return true;
+
+out_not_handled:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ return false;
}
void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
--
2.31.1
Powered by blists - more mailing lists