lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 24 Apr 2022 10:40:34 +0800
From:   Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
To:     linux-kernel@...r.kernel.org
Cc:     "Michael S. Tsirkin" <mst@...hat.com>,
        Jason Wang <jasowang@...hat.com>,
        virtualization@...ts.linux-foundation.org
Subject: [RFC PATCH 06/16] virtio_ring: split: introduce vring_reuse_bufs_split()

This patch will resubmit the buffers to the new vq in the order in which
they were submitted.

In order to get these buffers in order, the patch will get the buffers
from the avail ring. We can know the current position of the avail ring
from vring.avail->idx.

First, check backward from idx. If a state appears repeatedly, it means
that the buffer corresponding to this state has been consumed by the
device and resubmitted. We will remove the subsequent state from the
queue. Then move forward from the position where idx ends, the buffers
encountered at this time are the order in which they were submitted.

It is beneficial to ensure the order of buffers in the process of reuse.
For example, under virtio-net, if the order is not guaranteed, it may
lead to out-of-order tcp streams.

Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
---
 drivers/virtio/virtio_ring.c | 65 ++++++++++++++++++++++++++++++++++++
 1 file changed, 65 insertions(+)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 167442cfdb2a..02d4ffcc0a3b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -69,6 +69,7 @@
 struct vring_desc_state_split {
 	void *data;			/* Data for callback. */
 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
+	bool checked;
 };
 
 struct vring_desc_state_packed {
@@ -1007,6 +1008,70 @@ static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
 	return true;
 }
 
+static void vring_reuse_bufs_split(struct vring_virtqueue *vq,
+				   struct vring_virtqueue_split *vring,
+				   void (*recycle)(struct virtqueue *vq, void *buf))
+{
+	u32 head, num, idx, oidx, i, desc_num = 0;
+	u16 null, *p;
+	int err = 0;
+	void *buf;
+
+	num = vring->vring.num;
+
+	oidx = le16_to_cpu(vring->vring.avail->idx) - 1;
+	null = vring->vring.avail->ring[oidx & (num - 1)];
+
+	/*
+	 * Check in the opposite direction in the avail ring. If a state appears
+	 * repeatedly, it means that the state has been used and rejoined the
+	 * avail ring.
+	 */
+	for (i = 0, idx = oidx; i < num; ++i, --idx) {
+		p = &vring->vring.avail->ring[idx & (num - 1)];
+
+		head = virtio32_to_cpu(vq->vq.vdev, *p);
+
+		if (vring->desc_state[head].checked) {
+			*p = null;
+			continue;
+		}
+
+		vring->desc_state[head].checked = true;
+	}
+
+	/*
+	 * Checking the avail ring forward, the non-null states encountered are
+	 * the order in which they were added to the avail ring.
+	 */
+	for (i = 0, ++idx; i < num; ++i, ++idx) {
+		p = &vring->vring.avail->ring[idx & (num - 1)];
+		if (*p == null && idx != oidx)
+			continue;
+
+		head = virtio32_to_cpu(vq->vq.vdev, *p);
+
+		if (!vring->desc_state[head].data)
+			continue;
+
+		/* once add to vq fail, no more try add to vq. */
+		if (err >= 0) {
+			err = vring_copy_to_vq_split(vq, vring, head);
+			if (err >= 0) {
+				desc_num += err;
+				continue;
+			}
+		}
+
+		buf = vring->desc_state[head].data;
+		desc_num += detach_buf_from_vring_split(vring, vq, head, 0,
+							NULL);
+		recycle(&vq->vq, buf);
+	}
+
+	WARN_ON(vring->num_left != desc_num);
+}
+
 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
-- 
2.31.0

Powered by blists - more mailing lists