[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20240605115619.2965224-4-sashal@kernel.org>
Date: Wed, 5 Jun 2024 07:56:16 -0400
From: Sasha Levin <sashal@...nel.org>
To: linux-kernel@...r.kernel.org,
stable@...r.kernel.org
Cc: Mike Christie <michael.christie@...cle.com>,
"Michael S . Tsirkin" <mst@...hat.com>,
Sasha Levin <sashal@...nel.org>,
jasowang@...hat.com,
virtualization@...ts.linux.dev,
kvm@...r.kernel.org,
netdev@...r.kernel.org
Subject: [PATCH AUTOSEL 6.6 4/4] vhost-scsi: Handle vhost_vq_work_queue failures for events
From: Mike Christie <michael.christie@...cle.com>
[ Upstream commit b1b2ce58ed23c5d56e0ab299a5271ac01f95b75c ]
Currently, we can try to queue an event's work before the vhost_task is
created. When this happens we just drop it in vhost_scsi_do_plug before
even calling vhost_vq_work_queue. During a device shutdown we do the
same thing after vhost_scsi_clear_endpoint has cleared the backends.
In the next patches we will be able to kill the vhost_task before we
have cleared the endpoint. In that case, vhost_vq_work_queue can fail
and we will leak the event's memory. This has handle the failure by
just freeing the event. This is safe to do, because
vhost_vq_work_queue will only return failure for us when the vhost_task
is killed and so userspace will not be able to handle events if we
sent them.
Signed-off-by: Mike Christie <michael.christie@...cle.com>
Message-Id: <20240316004707.45557-2-michael.christie@...cle.com>
Signed-off-by: Michael S. Tsirkin <mst@...hat.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/vhost/scsi.c | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index abef0619c7901..8f17d29ab7e9c 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -497,10 +497,8 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
vq_err(vq, "Faulted on vhost_scsi_send_event\n");
}
-static void vhost_scsi_evt_work(struct vhost_work *work)
+static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
{
- struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
- vs_event_work);
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct vhost_scsi_evt *evt, *t;
struct llist_node *llnode;
@@ -508,12 +506,20 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
mutex_lock(&vq->mutex);
llnode = llist_del_all(&vs->vs_event_list);
llist_for_each_entry_safe(evt, t, llnode, list) {
- vhost_scsi_do_evt_work(vs, evt);
+ if (!drop)
+ vhost_scsi_do_evt_work(vs, evt);
vhost_scsi_free_evt(vs, evt);
}
mutex_unlock(&vq->mutex);
}
+static void vhost_scsi_evt_work(struct vhost_work *work)
+{
+ struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+ vs_event_work);
+ vhost_scsi_complete_events(vs, false);
+}
+
static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
{
struct iov_iter *iter = &cmd->saved_iter;
@@ -1509,7 +1515,8 @@ vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
}
llist_add(&evt->list, &vs->vs_event_list);
- vhost_vq_work_queue(vq, &vs->vs_event_work);
+ if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
+ vhost_scsi_complete_events(vs, true);
}
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
--
2.43.0
Powered by blists - more mailing lists