lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEsrr-3ArBgCksq=c60+5fZ-Xc-i653ix_vdr2f7c7wYfg@mail.gmail.com>
Date:   Fri, 26 May 2023 15:06:29 +0800
From:   Jason Wang <jasowang@...hat.com>
To:     Liang Chen <liangchen.linux@...il.com>
Cc:     mst@...hat.com, virtualization@...ts.linux-foundation.org,
        netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
        xuanzhuo@...ux.alibaba.com, kuba@...nel.org, edumazet@...gle.com,
        davem@...emloft.net, pabeni@...hat.com, alexander.duyck@...il.com
Subject: Re: [PATCH net-next 5/5] virtio_net: Implement DMA pre-handler

On Fri, May 26, 2023 at 1:47 PM Liang Chen <liangchen.linux@...il.com> wrote:
>
> Adding a DMA pre-handler that utilizes page pool for managing DMA mappings.
> When IOMMU is enabled, turning on the page_pool_dma_map module parameter to
> select page pool for DMA mapping management gives a significant reduction
> in the overhead caused by DMA mappings.
>
> In testing environments with a single core vm and qemu emulated IOMMU,
> significant performance improvements can be observed:
>   Upstream codebase: 1.76 Gbits/sec
>   Upstream codebase with page pool fragmentation support: 1.81 Gbits/sec
>   Upstream codebase with page pool fragmentation and DMA support: 19.3
>   Gbits/sec
>
> Signed-off-by: Liang Chen <liangchen.linux@...il.com>
> ---
>  drivers/net/virtio_net.c | 55 ++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 55 insertions(+)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index ac40b8c66c59..73cc4f9fe4fa 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -22,6 +22,7 @@
>  #include <net/route.h>
>  #include <net/xdp.h>
>  #include <net/net_failover.h>
> +#include <linux/iommu.h>
>
>  static int napi_weight = NAPI_POLL_WEIGHT;
>  module_param(napi_weight, int, 0444);
> @@ -33,8 +34,10 @@ module_param(napi_tx, bool, 0644);
>
>  static bool page_pool_enabled;
>  static bool page_pool_frag;
> +static bool page_pool_dma_map;
>  module_param(page_pool_enabled, bool, 0400);
>  module_param(page_pool_frag, bool, 0400);
> +module_param(page_pool_dma_map, bool, 0400);
>
>  /* FIXME: MTU in config. */
>  #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
> @@ -3830,6 +3833,49 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
>         virtnet_free_queues(vi);
>  }
>
> +static dma_addr_t virtnet_pp_dma_map_page(struct device *dev, struct page *page,
> +                                         unsigned long offset, size_t size,
> +                                         enum dma_data_direction dir, unsigned long attrs)
> +{
> +       struct page *head_page;
> +
> +       if (dir != DMA_FROM_DEVICE)
> +               return 0;
> +
> +       head_page = compound_head(page);
> +       return page_pool_get_dma_addr(head_page)
> +               + (page - head_page) * PAGE_SIZE
> +               + offset;

So it's not a map, it is just a query from the dma address from the pool.

> +}
> +
> +static bool virtnet_pp_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
> +                                     size_t size, enum dma_data_direction dir,
> +                                     unsigned long attrs)
> +{
> +       phys_addr_t phys;
> +
> +       /* Handle only the RX direction, and sync the DMA memory only if it's not
> +        * a DMA coherent architecture.
> +        */
> +       if (dir != DMA_FROM_DEVICE)
> +               return false;
> +
> +       if (dev_is_dma_coherent(dev))
> +               return true;
> +
> +       phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);

This would be somehow slow. If we track the mapping by driver, it
would be much faster.

More could be seen here:

https://lists.linuxfoundation.org/pipermail/virtualization/2023-May/066778.html

Thanks

> +       if (WARN_ON(!phys))
> +               return false;
> +
> +       arch_sync_dma_for_cpu(phys, size, dir);
> +       return true;
> +}
> +
> +static struct virtqueue_pre_dma_ops virtnet_pp_pre_dma_ops = {
> +       .map_page = virtnet_pp_dma_map_page,
> +       .unmap_page = virtnet_pp_dma_unmap_page,
> +};
> +
>  static void virtnet_alloc_page_pool(struct receive_queue *rq)
>  {
>         struct virtio_device *vdev = rq->vq->vdev;
> @@ -3845,6 +3891,15 @@ static void virtnet_alloc_page_pool(struct receive_queue *rq)
>         if (page_pool_frag)
>                 pp_params.flags |= PP_FLAG_PAGE_FRAG;
>
> +       /* Consider using page pool DMA support only when DMA API is used. */
> +       if (virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM) &&
> +           page_pool_dma_map) {
> +               pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
> +               pp_params.dma_dir = DMA_FROM_DEVICE;
> +               pp_params.max_len = PAGE_SIZE << pp_params.order;
> +               virtqueue_register_pre_dma_ops(rq->vq, &virtnet_pp_pre_dma_ops);
> +       }
> +
>         rq->page_pool = page_pool_create(&pp_params);
>         if (IS_ERR(rq->page_pool)) {
>                 dev_warn(&vdev->dev, "page pool creation failed: %ld\n",
> --
> 2.31.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ