[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <35c505ee-b44d-4817-ab68-c4f1f768b242@davidwei.uk>
Date: Mon, 7 Oct 2024 19:45:12 -0700
From: David Wei <dw@...idwei.uk>
To: Taehee Yoo <ap420073@...il.com>, davem@...emloft.net, kuba@...nel.org,
pabeni@...hat.com, edumazet@...gle.com, almasrymina@...gle.com,
netdev@...r.kernel.org, linux-doc@...r.kernel.org, donald.hunter@...il.com,
corbet@....net, michael.chan@...adcom.com
Cc: kory.maincent@...tlin.com, andrew@...n.ch, maxime.chevallier@...tlin.com,
danieller@...dia.com, hengqi@...ux.alibaba.com, ecree.xilinx@...il.com,
przemyslaw.kitszel@...el.com, hkallweit1@...il.com, ahmed.zaki@...el.com,
paul.greenwalt@...el.com, rrameshbabu@...dia.com, idosch@...dia.com,
asml.silence@...il.com, kaiyuanz@...gle.com, willemb@...gle.com,
aleksander.lobakin@...el.com, sridhar.samudrala@...el.com, bcreeley@....com
Subject: Re: [PATCH net-next v3 7/7] bnxt_en: add support for device memory
tcp
On 2024-10-03 09:06, Taehee Yoo wrote:
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index 872b15842b11..64e07d247f97 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -55,6 +55,7 @@
> #include <net/page_pool/helpers.h>
> #include <linux/align.h>
> #include <net/netdev_queues.h>
> +#include <net/netdev_rx_queue.h>
>
> #include "bnxt_hsi.h"
> #include "bnxt.h"
> @@ -863,6 +864,22 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
> bnapi->events &= ~BNXT_TX_CMP_EVENT;
> }
>
> +static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
> + struct bnxt_rx_ring_info *rxr,
> + unsigned int *offset,
> + gfp_t gfp)
gfp is unused
> +{
> + netmem_ref netmem;
> +
> + netmem = page_pool_alloc_netmem(rxr->page_pool, GFP_ATOMIC);
> + if (!netmem)
> + return 0;
> + *offset = 0;
> +
> + *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
offset is always 0
> + return netmem;
> +}
> +
> static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
> struct bnxt_rx_ring_info *rxr,
> unsigned int *offset,
[...]
> @@ -1192,6 +1209,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
>
> static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
> struct bnxt_cp_ring_info *cpr,
> + struct sk_buff *skb,
> struct skb_shared_info *shinfo,
> u16 idx, u32 agg_bufs, bool tpa,
> struct xdp_buff *xdp)
> @@ -1211,7 +1229,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
> u16 cons, frag_len;
> struct rx_agg_cmp *agg;
> struct bnxt_sw_rx_agg_bd *cons_rx_buf;
> - struct page *page;
> + netmem_ref netmem;
> dma_addr_t mapping;
>
> if (p5_tpa)
> @@ -1223,9 +1241,15 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
> RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
>
> cons_rx_buf = &rxr->rx_agg_ring[cons];
> - skb_frag_fill_page_desc(frag, cons_rx_buf->page,
> - cons_rx_buf->offset, frag_len);
> - shinfo->nr_frags = i + 1;
> + if (skb) {
> + skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
> + cons_rx_buf->offset, frag_len,
> + BNXT_RX_PAGE_SIZE);
> + } else {
> + skb_frag_fill_page_desc(frag, netmem_to_page(cons_rx_buf->netmem),
> + cons_rx_buf->offset, frag_len);
> + shinfo->nr_frags = i + 1;
> + }
I feel like this function needs a refactor at some point to split out
the skb and xdp paths.
> __clear_bit(cons, rxr->rx_agg_bmap);
>
> /* It is possible for bnxt_alloc_rx_page() to allocate
[...]
> @@ -3608,9 +3629,11 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
>
> static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
> struct bnxt_rx_ring_info *rxr,
> + int queue_idx,
To save a parameter, the index is available already in rxr->bnapi->index
> int numa_node)
> {
> struct page_pool_params pp = { 0 };
> + struct netdev_rx_queue *rxq;
>
> pp.pool_size = bp->rx_agg_ring_size;
> if (BNXT_RX_PAGE_MODE(bp))
> @@ -3621,8 +3644,15 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
> pp.dev = &bp->pdev->dev;
> pp.dma_dir = bp->rx_dir;
> pp.max_len = PAGE_SIZE;
> - pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
> + pp.order = 0;
> +
> + rxq = __netif_get_rx_queue(bp->dev, queue_idx);
> + if (rxq->mp_params.mp_priv)
> + pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_ALLOW_UNREADABLE_NETMEM;
> + else
> + pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
>
> + pp.queue_idx = queue_idx;
> rxr->page_pool = page_pool_create(&pp);
> if (IS_ERR(rxr->page_pool)) {
> int err = PTR_ERR(rxr->page_pool);
> @@ -3655,7 +3685,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
> cpu_node = cpu_to_node(cpu);
> netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
> i, cpu_node);
> - rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
> + rc = bnxt_alloc_rx_page_pool(bp, rxr, i, cpu_node);
> if (rc)
> return rc;
>
Powered by blists - more mailing lists