[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aFHPw5pjyA9yIepf@mini-arch>
Date: Tue, 17 Jun 2025 13:27:47 -0700
From: Stanislav Fomichev <stfomichev@...il.com>
To: Taehee Yoo <ap420073@...il.com>
Cc: davem@...emloft.net, kuba@...nel.org, pabeni@...hat.com,
edumazet@...gle.com, andrew+netdev@...n.ch, horms@...nel.org,
michael.chan@...adcom.com, pavan.chebbi@...adcom.com,
almasrymina@...gle.com, sdf@...ichev.me, netdev@...r.kernel.org
Subject: Re: [PATCH net-next] eth: bnxt: add netmem TX support
On 06/17, Taehee Yoo wrote:
> Use netmem_dma_*() helpers and declare netmem_tx to support netmem TX.
> By this change, all bnxt devices will support the netmem TX.
>
> bnxt_start_xmit() uses memcpy() if a packet is too small. However,
> netmem packets are unreadable, so memcpy() is not allowed.
> It should check whether an skb is readable, and if an SKB is unreadable,
> it is processed by the normal transmission logic.
>
> netmem TX can be tested with ncdevmem.c
>
> Signed-off-by: Taehee Yoo <ap420073@...il.com>
> ---
> drivers/net/ethernet/broadcom/bnxt/bnxt.c | 28 ++++++++++++++---------
> 1 file changed, 17 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index 869580b6f70d..4de9dc123a18 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -477,6 +477,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> struct bnxt_tx_ring_info *txr;
> struct bnxt_sw_tx_bd *tx_buf;
> __le32 lflags = 0;
> + skb_frag_t *frag;
>
> i = skb_get_queue_mapping(skb);
> if (unlikely(i >= bp->tx_nr_rings)) {
> @@ -563,7 +564,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
>
> if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
> - !lflags) {
> + skb_frags_readable(skb) && !lflags) {
> struct tx_push_buffer *tx_push_buf = txr->tx_push;
> struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
> struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
> @@ -598,9 +599,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> skb_copy_from_linear_data(skb, pdata, len);
> pdata += len;
> for (j = 0; j < last_frag; j++) {
> - skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
> void *fptr;
>
> + frag = &skb_shinfo(skb)->frags[j];
> fptr = skb_frag_address_safe(frag);
> if (!fptr)
> goto normal_tx;
> @@ -708,8 +709,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
> txbd0 = txbd;
> for (i = 0; i < last_frag; i++) {
> - skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
> -
> + frag = &skb_shinfo(skb)->frags[i];
> prod = NEXT_TX(prod);
> txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
>
> @@ -721,7 +721,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> goto tx_dma_error;
>
> tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
> - dma_unmap_addr_set(tx_buf, mapping, mapping);
> + netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
> + mapping, mapping);
>
> txbd->tx_bd_haddr = cpu_to_le64(mapping);
>
> @@ -778,9 +779,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> for (i = 0; i < last_frag; i++) {
> prod = NEXT_TX(prod);
> tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
> - dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
> - skb_frag_size(&skb_shinfo(skb)->frags[i]),
> - DMA_TO_DEVICE);
> + frag = &skb_shinfo(skb)->frags[i];
> + netmem_dma_unmap_page_attrs(&pdev->dev,
> + dma_unmap_addr(tx_buf, mapping),
> + skb_frag_size(frag),
> + DMA_TO_DEVICE, 0);
> }
>
> tx_free:
> @@ -3422,9 +3425,11 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
> skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
>
> tx_buf = &txr->tx_buf_ring[ring_idx];
> - dma_unmap_page(&pdev->dev,
> - dma_unmap_addr(tx_buf, mapping),
> - skb_frag_size(frag), DMA_TO_DEVICE);
> + netmem_dma_unmap_page_attrs(&pdev->dev,
> + dma_unmap_addr(tx_buf,
> + mapping),
> + skb_frag_size(frag),
> + DMA_TO_DEVICE, 0);
> }
> dev_kfree_skb(skb);
> }
> @@ -16713,6 +16718,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
> if (BNXT_SUPPORTS_QUEUE_API(bp))
> dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
> dev->request_ops_lock = true;
> + dev->netmem_tx = true;
>
> rc = register_netdev(dev);
> if (rc)
Acked-by: Stanislav Fomichev <sdf@...ichev.me>
Similar to what I had internally for testing. One thing to think about
here might be to put that netmem_tx=true under BNXT_SUPPORTS_QUEUE_API
conditional. This way both rx/tx will either be supported or not. But
since there is probably no real FW requirement for TX, should be good
as is.
Powered by blists - more mailing lists