[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210603004133.4079390-4-ak@linux.intel.com>
Date: Wed, 2 Jun 2021 17:41:28 -0700
From: Andi Kleen <ak@...ux.intel.com>
To: mst@...hat.com
Cc: jasowang@...hat.com, virtualization@...ts.linux-foundation.org,
hch@....de, m.szyprowski@...sung.com, robin.murphy@....com,
iommu@...ts.linux-foundation.org, x86@...nel.org,
sathyanarayanan.kuppuswamy@...ux.intel.com, jpoimboe@...hat.com,
linux-kernel@...r.kernel.org, Andi Kleen <ak@...ux.intel.com>
Subject: [PATCH v1 3/8] virtio: Harden split buffer detachment
Harden the split buffer detachment path by adding boundary checking. Note
that when this fails we may fail to unmap some swiotlb mapping, which could
result in a leak and a DOS. But that's acceptable because an malicious host
can DOS us anyways.
Signed-off-by: Andi Kleen <ak@...ux.intel.com>
---
drivers/virtio/virtio_ring.c | 25 +++++++++++++++++++++----
1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index d37ff5a0ff58..1e9aa1e95e1b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -651,12 +651,19 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
return needs_kick;
}
-static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
- void **ctx)
+static int detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
+ void **ctx)
{
unsigned int i, j;
__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
+ /* We'll leak DMA mappings when this happens, but nothing
+ * can be done about that. In the worst case the host
+ * could DOS us, but it can of course do that anyways.
+ */
+ if (!inside_split_ring(vq, head))
+ return -EIO;
+
/* Clear data ptr. */
vq->split.desc_state[head].data = NULL;
@@ -666,6 +673,8 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
while (vq->split.vring.desc[i].flags & nextflag) {
vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
+ if (!inside_split_ring(vq, i))
+ return -EIO;
vq->vq.num_free++;
}
@@ -684,7 +693,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
/* Free the indirect table, if any, now that it's unmapped. */
if (!indir_desc)
- return;
+ return 0;
len = virtio32_to_cpu(vq->vq.vdev,
vq->split.vring.desc[head].len);
@@ -701,6 +710,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
} else if (ctx) {
*ctx = vq->split.desc_state[head].indir_desc;
}
+ return 0;
}
static inline bool more_used_split(const struct vring_virtqueue *vq)
@@ -717,6 +727,7 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
void *ret;
unsigned int i;
u16 last_used;
+ int err;
START_USE(vq);
@@ -751,7 +762,12 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
/* detach_buf_split clears data, so grab it now. */
ret = vq->split.desc_state[i].data;
- detach_buf_split(vq, i, ctx);
+ err = detach_buf_split(vq, i, ctx);
+ if (err) {
+ END_USE(vq);
+ return NULL;
+ }
+
vq->last_used_idx++;
/* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
@@ -863,6 +879,7 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
/* detach_buf_split clears data, so grab it now. */
buf = vq->split.desc_state[i].data;
detach_buf_split(vq, i, NULL);
+ /* Don't need to check for error because nothing is returned */
vq->split.avail_idx_shadow--;
vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
vq->split.avail_idx_shadow);
--
2.25.4
Powered by blists - more mailing lists