[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAC_iWjJdRqdOvxLB2gwkNHWLGL4e4dkcJ5A1=K2SeJ-9cjS8SQ@mail.gmail.com>
Date: Wed, 26 Jul 2023 09:56:09 +0300
From: Ilias Apalodimas <ilias.apalodimas@...aro.org>
To: Jesper Dangaard Brouer <jbrouer@...hat.com>
Cc: Jakub Kicinski <kuba@...nel.org>, netdev@...r.kernel.org, brouer@...hat.com,
almasrymina@...gle.com, hawk@...nel.org, edumazet@...gle.com,
dsahern@...il.com, michael.chan@...adcom.com, willemb@...gle.com
Subject: Re: [RFC 08/12] eth: bnxt: let the page pool manage the DMA mapping
[...]
> > - *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
> > - DMA_ATTR_WEAK_ORDERING);
> > - if (dma_mapping_error(dev, *mapping)) {
> > - page_pool_recycle_direct(rxr->page_pool, page);
> > - return NULL;
> > - }
> > + *mapping = page_pool_get_dma_addr(page);
> > + dma_sync_single_for_device(dev, *mapping, PAGE_SIZE, DMA_BIDIRECTIONAL);
> > +
>
> You can keep this as-is, but I just wanted mention that page_pool
> supports doing the "dma_sync_for_device" via PP_FLAG_DMA_SYNC_DEV.
> Thus, removing more lines from driver code.
+1 to that. Also, the direction is stored in pp->dma_dir, so it
should automatically do the right thing.
Regards
/Ilias
>
> > return page;
> > }
> >
> > @@ -951,6 +948,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
> > unsigned int offset_and_len)
> > {
> > unsigned int len = offset_and_len & 0xffff;
> > + struct device *dev = &bp->pdev->dev;
> > struct page *page = data;
> > u16 prod = rxr->rx_prod;
> > struct sk_buff *skb;
> > @@ -962,8 +960,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
> > return NULL;
> > }
> > dma_addr -= bp->rx_dma_offset;
> > - dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
> > - DMA_ATTR_WEAK_ORDERING);
> > + dma_sync_single_for_cpu(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
> > skb = build_skb(page_address(page), PAGE_SIZE);
> > if (!skb) {
> > page_pool_recycle_direct(rxr->page_pool, page);
> > @@ -984,6 +981,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
> > {
> > unsigned int payload = offset_and_len >> 16;
> > unsigned int len = offset_and_len & 0xffff;
> > + struct device *dev = &bp->pdev->dev;
> > skb_frag_t *frag;
> > struct page *page = data;
> > u16 prod = rxr->rx_prod;
> > @@ -996,8 +994,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
> > return NULL;
> > }
> > dma_addr -= bp->rx_dma_offset;
> > - dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
> > - DMA_ATTR_WEAK_ORDERING);
> > + dma_sync_single_for_cpu(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
> >
> > if (unlikely(!payload))
> > payload = eth_get_headlen(bp->dev, data_ptr, len);
> > @@ -2943,9 +2940,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
> > rx_buf->data = NULL;
> > if (BNXT_RX_PAGE_MODE(bp)) {
> > mapping -= bp->rx_dma_offset;
> > - dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
> > - bp->rx_dir,
> > - DMA_ATTR_WEAK_ORDERING);
> > page_pool_recycle_direct(rxr->page_pool, data);
> > } else {
> > dma_unmap_single_attrs(&pdev->dev, mapping,
> > @@ -2967,9 +2961,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
> > continue;
> >
> > if (BNXT_RX_PAGE_MODE(bp)) {
> > - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
> > - BNXT_RX_PAGE_SIZE, bp->rx_dir,
> > - DMA_ATTR_WEAK_ORDERING);
> > rx_agg_buf->page = NULL;
> > __clear_bit(i, rxr->rx_agg_bmap);
> >
> > @@ -3208,6 +3199,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
> > {
> > struct page_pool_params pp = { 0 };
> >
> > + pp.flags = PP_FLAG_DMA_MAP;
> > pp.pool_size = bp->rx_ring_size;
> > pp.nid = dev_to_node(&bp->pdev->dev);
> > pp.napi = &rxr->bnapi->napi;
>
Powered by blists - more mailing lists