[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1711611723.1223743-4-xuanzhuo@linux.alibaba.com>
Date: Thu, 28 Mar 2024 15:42:03 +0800
From: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
To: Jason Wang <jasowang@...hat.com>
Cc: virtualization@...ts.linux.dev,
"Michael S. Tsirkin" <mst@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
netdev@...r.kernel.org
Subject: Re: [PATCH vhost v6 05/10] virtio_ring: split: structure the indirect desc table
On Thu, 28 Mar 2024 15:01:02 +0800, Jason Wang <jasowang@...hat.com> wrote:
> On Wed, Mar 27, 2024 at 7:14 PM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
> >
> > This commit structure the indirect desc table.
> > Then we can get the desc num directly when doing unmap.
> >
> > And save the dma info to the struct, then the indirect
> > will not use the dma fields of the desc_extra. The subsequent
> > commits will make the dma fields are optional. But for
> > the indirect case, we must record the dma info.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> > ---
> > drivers/virtio/virtio_ring.c | 87 +++++++++++++++++++++---------------
> > 1 file changed, 51 insertions(+), 36 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > index 8170761ab25e..1f7c96543d58 100644
> > --- a/drivers/virtio/virtio_ring.c
> > +++ b/drivers/virtio/virtio_ring.c
> > @@ -69,7 +69,7 @@
> >
> > struct vring_desc_state_split {
> > void *data; /* Data for callback. */
> > - struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
> > + struct vring_desc_extra *indir_desc; /* Indirect descriptor, if any. */
> > };
> >
> > struct vring_desc_state_packed {
> > @@ -469,12 +469,16 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> > return extra[i].next;
> > }
> >
> > -static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
> > - unsigned int total_sg,
> > - gfp_t gfp)
> > +static struct vring_desc_extra *alloc_indirect_split(struct virtqueue *_vq,
> > + unsigned int total_sg,
> > + gfp_t gfp)
> > {
> > + struct vring_desc_extra *in_extra;
> > struct vring_desc *desc;
> > unsigned int i;
> > + u32 size;
> > +
> > + size = sizeof(*in_extra) + sizeof(struct vring_desc) * total_sg;
> >
> > /*
> > * We require lowmem mappings for the descriptors because
> > @@ -483,13 +487,16 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
> > */
> > gfp &= ~__GFP_HIGHMEM;
> >
> > - desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
> > - if (!desc)
> > + in_extra = kmalloc(size, gfp);
> > + if (!in_extra)
> > return NULL;
> >
> > + desc = (struct vring_desc *)(in_extra + 1);
> > +
> > for (i = 0; i < total_sg; i++)
> > desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
> > - return desc;
> > +
> > + return in_extra;
> > }
> >
> > static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
> > @@ -531,6 +538,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> > gfp_t gfp)
> > {
> > struct vring_virtqueue *vq = to_vvq(_vq);
> > + struct vring_desc_extra *in_extra;
> > struct scatterlist *sg;
> > struct vring_desc *desc;
> > unsigned int i, n, avail, descs_used, prev, err_idx;
> > @@ -553,9 +561,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> >
> > head = vq->free_head;
> >
> > - if (virtqueue_use_indirect(vq, total_sg))
> > - desc = alloc_indirect_split(_vq, total_sg, gfp);
> > - else {
> > + if (virtqueue_use_indirect(vq, total_sg)) {
> > + in_extra = alloc_indirect_split(_vq, total_sg, gfp);
> > + if (!in_extra)
> > + desc = NULL;
> > + else
> > + desc = (struct vring_desc *)(in_extra + 1);
> > + } else {
> > desc = NULL;
> > WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
> > }
> > @@ -628,10 +640,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> > ~VRING_DESC_F_NEXT;
> >
> > if (indirect) {
> > + u32 size = total_sg * sizeof(struct vring_desc);
> > +
> > /* Now that the indirect table is filled in, map it. */
> > - dma_addr_t addr = vring_map_single(
> > - vq, desc, total_sg * sizeof(struct vring_desc),
> > - DMA_TO_DEVICE);
> > + dma_addr_t addr = vring_map_single(vq, desc, size, DMA_TO_DEVICE);
> > if (vring_mapping_error(vq, addr)) {
> > if (!vring_need_unmap_buffer(vq))
> > goto free_indirect;
> > @@ -639,11 +651,18 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> > goto unmap_release;
> > }
> >
> > - virtqueue_add_desc_split(_vq, vq->split.vring.desc,
> > - head, addr,
> > - total_sg * sizeof(struct vring_desc),
> > - VRING_DESC_F_INDIRECT,
> > - false);
> > + desc = &vq->split.vring.desc[head];
> > +
> > + desc->flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
> > + desc->addr = cpu_to_virtio64(_vq->vdev, addr);
> > + desc->len = cpu_to_virtio32(_vq->vdev, size);
> > +
> > + vq->split.desc_extra[head].flags = VRING_DESC_F_INDIRECT;
> > +
> > + if (vq->use_dma_api) {
> > + in_extra->addr = addr;
> > + in_extra->len = size;
> > + }
>
> I would find ways to reuse virtqueue_add_desc_split instead of open coding here.
I will try.
Thanks.
>
> Thanks
>
Powered by blists - more mailing lists