lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEvGTiZUepzRL9dMNaxZUenKzrqPnnd9594aWjF-KcXCrw@mail.gmail.com>
Date: Thu, 28 Mar 2024 14:56:55 +0800
From: Jason Wang <jasowang@...hat.com>
To: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
Cc: virtualization@...ts.linux.dev, "Michael S. Tsirkin" <mst@...hat.com>, 
	"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, 
	Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, netdev@...r.kernel.org
Subject: Re: [PATCH vhost v6 03/10] virtio_ring: packed: structure the
 indirect desc table

On Wed, Mar 27, 2024 at 7:14 PM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
>
> This commit structure the indirect desc table.
> Then we can get the desc num directly when doing unmap.
>
> And save the dma info to the struct, then the indirect
> will not use the dma fields of the desc_extra. The subsequent
> commits will make the dma fields are optional.

Nit: It's better to add something like "so we can't reuse the
desc_extra[] array"

> But for
> the indirect case, we must record the dma info.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> ---
>  drivers/virtio/virtio_ring.c | 61 +++++++++++++++++++-----------------
>  1 file changed, 33 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index a2838fe1cc08..e3343cf55774 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -74,7 +74,7 @@ struct vring_desc_state_split {
>
>  struct vring_desc_state_packed {
>         void *data;                     /* Data for callback. */
> -       struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
> +       struct vring_desc_extra *indir_desc; /* Indirect descriptor, if any. */

Should be "DMA info with indirect descriptor, if any" ?

>         u16 num;                        /* Descriptor list length. */
>         u16 last;                       /* The last desc state in a list. */
>  };
> @@ -1243,10 +1243,13 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
>                        DMA_FROM_DEVICE : DMA_TO_DEVICE);
>  }
>
> -static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
> -                                                      gfp_t gfp)
> +static struct vring_desc_extra *alloc_indirect_packed(unsigned int total_sg,
> +                                                     gfp_t gfp)
>  {
> -       struct vring_packed_desc *desc;
> +       struct vring_desc_extra *in_extra;
> +       u32 size;
> +
> +       size = sizeof(*in_extra) + sizeof(struct vring_packed_desc) * total_sg;
>
>         /*
>          * We require lowmem mappings for the descriptors because
> @@ -1255,9 +1258,10 @@ static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
>          */
>         gfp &= ~__GFP_HIGHMEM;
>
> -       desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
>
> -       return desc;
> +       in_extra = kmalloc(size, gfp);
> +
> +       return in_extra;
>  }
>
>  static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
> @@ -1268,6 +1272,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
>                                          void *data,
>                                          gfp_t gfp)
>  {
> +       struct vring_desc_extra *in_extra;
>         struct vring_packed_desc *desc;
>         struct scatterlist *sg;
>         unsigned int i, n, err_idx;
> @@ -1275,10 +1280,12 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
>         dma_addr_t addr;
>
>         head = vq->packed.next_avail_idx;
> -       desc = alloc_indirect_packed(total_sg, gfp);
> -       if (!desc)
> +       in_extra = alloc_indirect_packed(total_sg, gfp);
> +       if (!in_extra)
>                 return -ENOMEM;
>
> +       desc = (struct vring_packed_desc *)(in_extra + 1);
> +
>         if (unlikely(vq->vq.num_free < 1)) {
>                 pr_debug("Can't add buf len 1 - avail = 0\n");
>                 kfree(desc);
> @@ -1315,17 +1322,16 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
>                 goto unmap_release;
>         }
>
> +       if (vq->use_dma_api) {
> +               in_extra->addr = addr;
> +               in_extra->len = total_sg * sizeof(struct vring_packed_desc);
> +       }

Any reason why we don't do it after the below assignment of descriptor fields?

> +
>         vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
>         vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
>                                 sizeof(struct vring_packed_desc));
>         vq->packed.vring.desc[head].id = cpu_to_le16(id);
>
> -       if (vq->use_dma_api) {
> -               vq->packed.desc_extra[id].addr = addr;
> -               vq->packed.desc_extra[id].len = total_sg *
> -                               sizeof(struct vring_packed_desc);
> -       }
> -
>         vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
>                 vq->packed.avail_used_flags;
>
> @@ -1356,7 +1362,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
>         /* Store token and indirect buffer state. */
>         vq->packed.desc_state[id].num = 1;
>         vq->packed.desc_state[id].data = data;
> -       vq->packed.desc_state[id].indir_desc = desc;
> +       vq->packed.desc_state[id].indir_desc = in_extra;
>         vq->packed.desc_state[id].last = id;
>
>         vq->num_added += 1;
> @@ -1375,7 +1381,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
>                 vring_unmap_desc_packed(vq, &desc[i]);
>
>  free_desc:
> -       kfree(desc);
> +       kfree(in_extra);
>
>         END_USE(vq);
>         return -ENOMEM;
> @@ -1589,7 +1595,6 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
>                               unsigned int id, void **ctx)
>  {
>         struct vring_desc_state_packed *state = NULL;
> -       struct vring_packed_desc *desc;
>         unsigned int i, curr;
>         u16 flags;
>
> @@ -1616,27 +1621,27 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
>                 if (ctx)
>                         *ctx = state->indir_desc;
>         } else {
> -               const struct vring_desc_extra *extra;
> -               u32 len;
> +               struct vring_desc_extra *in_extra;
> +               struct vring_packed_desc *desc;
> +               u32 num;
> +
> +               in_extra = state->indir_desc;
>
>                 if (vq->use_dma_api) {
> -                       extra = &vq->packed.desc_extra[id];
>                         dma_unmap_single(vring_dma_dev(vq),
> -                                        extra->addr, extra->len,
> +                                        in_extra->addr, in_extra->len,
>                                          (flags & VRING_DESC_F_WRITE) ?
>                                          DMA_FROM_DEVICE : DMA_TO_DEVICE);

Can't we just reuse vring_unmap_extra_packed() here?

Thanks


>                 }
>
> -               /* Free the indirect table, if any, now that it's unmapped. */
> -               desc = state->indir_desc;
> -
>                 if (vring_need_unmap_buffer(vq)) {
> -                       len = vq->packed.desc_extra[id].len;
> -                       for (i = 0; i < len / sizeof(struct vring_packed_desc);
> -                                       i++)
> +                       num = in_extra->len / sizeof(struct vring_packed_desc);
> +                       desc = (struct vring_packed_desc *)(in_extra + 1);
> +
> +                       for (i = 0; i < num; i++)
>                                 vring_unmap_desc_packed(vq, &desc[i]);
>                 }
> -               kfree(desc);
> +               kfree(in_extra);
>                 state->indir_desc = NULL;
>         }
>  }
> --
> 2.32.0.3.g01195cf9f
>


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ