[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEvhejnVM=x2+PxnKXcyC4W4nAbhkt4-reWb-7=fYQ6qKw@mail.gmail.com>
Date: Thu, 18 Apr 2024 14:25:06 +0800
From: Jason Wang <jasowang@...hat.com>
To: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
Cc: virtualization@...ts.linux.dev, "Michael S. Tsirkin" <mst@...hat.com>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, netdev@...r.kernel.org
Subject: Re: [PATCH vhost 4/6] virtio_net: big mode support premapped
On Thu, Apr 11, 2024 at 10:51 AM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
>
> In big mode, pre-mapping DMA is beneficial because if the pages are not
> used, we can reuse them without needing to unmap and remap.
>
> We require space to store the DMA address. I use the page.dma_addr to
> store the DMA address from the pp structure inside the page.
>
> Every page retrieved from get_a_page() is mapped, and its DMA address is
> stored in page.dma_addr. When a page is returned to the chain, we check
> the DMA status; if it is not mapped (potentially having been unmapped),
> we remap it before returning it to the chain.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> ---
> drivers/net/virtio_net.c | 98 +++++++++++++++++++++++++++++++++-------
> 1 file changed, 81 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 4446fb54de6d..7ea7e9bcd5d7 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -50,6 +50,7 @@ module_param(napi_tx, bool, 0644);
>
> #define page_chain_next(p) ((struct page *)((p)->pp))
> #define page_chain_add(p, n) ((p)->pp = (void *)n)
> +#define page_dma_addr(p) ((p)->dma_addr)
>
> /* RX packet size EWMA. The average packet size is used to determine the packet
> * buffer size when refilling RX rings. As the entire RX ring may be refilled
> @@ -434,6 +435,46 @@ skb_vnet_common_hdr(struct sk_buff *skb)
> return (struct virtio_net_common_hdr *)skb->cb;
> }
>
> +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> +{
> + sg->dma_address = addr;
> + sg->length = len;
> +}
> +
> +static void page_chain_unmap(struct receive_queue *rq, struct page *p)
> +{
> + virtqueue_dma_unmap_page_attrs(rq->vq, page_dma_addr(p), PAGE_SIZE,
> + DMA_FROM_DEVICE, 0);
> +
> + page_dma_addr(p) = DMA_MAPPING_ERROR;
> +}
> +
> +static int page_chain_map(struct receive_queue *rq, struct page *p)
> +{
> + dma_addr_t addr;
> +
> + addr = virtqueue_dma_map_page_attrs(rq->vq, p, 0, PAGE_SIZE, DMA_FROM_DEVICE, 0);
> + if (virtqueue_dma_mapping_error(rq->vq, addr))
> + return -ENOMEM;
> +
> + page_dma_addr(p) = addr;
> + return 0;
> +}
> +
> +static void page_chain_release(struct receive_queue *rq)
> +{
> + struct page *p, *n;
> +
> + for (p = rq->pages; p; p = n) {
> + n = page_chain_next(p);
> +
> + page_chain_unmap(rq, p);
> + __free_pages(p, 0);
> + }
> +
> + rq->pages = NULL;
> +}
> +
> /*
> * put the whole most recent used list in the beginning for reuse
> */
> @@ -441,6 +482,13 @@ static void give_pages(struct receive_queue *rq, struct page *page)
> {
> struct page *end;
>
> + if (page_dma_addr(page) == DMA_MAPPING_ERROR) {
This looks strange, the map should be done during allocation. Under
which condition could we hit this?
> + if (page_chain_map(rq, page)) {
> + __free_pages(page, 0);
> + return;
> + }
> + }
> +
> /* Find end of list, sew whole thing into vi->rq.pages. */
> for (end = page; page_chain_next(end); end = page_chain_next(end));
>
> @@ -456,8 +504,15 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
> rq->pages = page_chain_next(p);
> /* clear chain here, it is used to chain pages */
> page_chain_add(p, NULL);
> - } else
> + } else {
> p = alloc_page(gfp_mask);
> +
> + if (page_chain_map(rq, p)) {
> + __free_pages(p, 0);
> + return NULL;
> + }
> + }
> +
> return p;
> }
>
> @@ -613,8 +668,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> return NULL;
>
> page = page_chain_next(page);
> - if (page)
> - give_pages(rq, page);
> goto ok;
> }
>
> @@ -640,6 +693,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> skb_add_rx_frag(skb, 0, page, offset, len, truesize);
> else
> page_to_free = page;
> + page = NULL;
> goto ok;
> }
>
> @@ -657,6 +711,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> BUG_ON(offset >= PAGE_SIZE);
> while (len) {
> unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
> +
> + /* unmap the page before using it. */
> + if (!offset)
> + page_chain_unmap(rq, page);
> +
This sounds strange, do we need a virtqueue_sync_for_cpu() helper here?
> skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
> frag_size, truesize);
> len -= frag_size;
> @@ -664,15 +723,15 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> offset = 0;
> }
>
> - if (page)
> - give_pages(rq, page);
> -
> ok:
> hdr = skb_vnet_common_hdr(skb);
> memcpy(hdr, hdr_p, hdr_len);
> if (page_to_free)
> put_page(page_to_free);
>
> + if (page)
> + give_pages(rq, page);
> +
> return skb;
> }
>
> @@ -823,7 +882,8 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
>
> rq = &vi->rq[i];
>
> - if (rq->do_dma)
> + /* Skip the unmap for big mode. */
> + if (!vi->big_packets || vi->mergeable_rx_bufs)
> virtnet_rq_unmap(rq, buf, 0);
>
> virtnet_rq_free_buf(vi, rq, buf);
> @@ -1346,8 +1406,12 @@ static struct sk_buff *receive_big(struct net_device *dev,
> struct virtnet_rq_stats *stats)
> {
> struct page *page = buf;
> - struct sk_buff *skb =
> - page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
> + struct sk_buff *skb;
> +
> + /* Unmap first page. The follow code may read this page. */
> + page_chain_unmap(rq, page);
And probably here as well.
Thanks
Powered by blists - more mailing lists