[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250324060127.2358-2-jasowang@redhat.com>
Date: Mon, 24 Mar 2025 14:01:22 +0800
From: Jason Wang <jasowang@...hat.com>
To: mst@...hat.com,
jasowang@...hat.com
Cc: xuanzhuo@...ux.alibaba.com,
eperezma@...hat.com,
virtualization@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: [PATCH 14/19] virtio_ring: determine descriptor flags at one time
Let's determine the last descriptor by counting the number of sg. This
would be consistent with packed virtqueue implementation and ease the
future in-order implementation.
Signed-off-by: Jason Wang <jasowang@...hat.com>
---
drivers/virtio/virtio_ring.c | 25 +++++++++++++------------
1 file changed, 13 insertions(+), 12 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ce1dc90ee89d..31aa4a935c27 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -561,7 +561,7 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
struct vring_desc_extra *extra;
struct scatterlist *sg;
struct vring_desc *desc;
- unsigned int i, n, avail, descs_used, prev, err_idx;
+ unsigned int i, n, c, avail, descs_used, err_idx;
int head;
bool indirect;
@@ -617,46 +617,47 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
return -ENOSPC;
}
+ c = 0;
for (n = 0; n < out_sgs; n++) {
+ sg = sgs[n];
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr;
u32 len;
+ u16 flags = 0;
if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped))
goto unmap_release;
- prev = i;
+ if (++c != total_sg)
+ flags = VRING_DESC_F_NEXT;
+
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
- VRING_DESC_F_NEXT,
+ flags,
premapped);
}
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ u16 flags = VRING_DESC_F_WRITE;
dma_addr_t addr;
u32 len;
if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped))
goto unmap_release;
- prev = i;
+ if (++c != total_sg)
+ flags |= VRING_DESC_F_NEXT;
+
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
- VRING_DESC_F_NEXT |
- VRING_DESC_F_WRITE,
- premapped);
+ flags, premapped);
}
}
- /* Last one doesn't continue. */
- desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT);
- if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
- vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
- ~VRING_DESC_F_NEXT;
if (indirect) {
/* Now that the indirect table is filled in, map it. */
--
2.42.0
Powered by blists - more mailing lists