[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e129c6bdfceadeaa4f4fe17524ea24f3c3471be7.1753694913.git.asml.silence@gmail.com>
Date: Mon, 28 Jul 2025 12:04:10 +0100
From: Pavel Begunkov <asml.silence@...il.com>
To: Jakub Kicinski <kuba@...nel.org>,
netdev@...r.kernel.org
Cc: asml.silence@...il.com,
io-uring@...r.kernel.org,
Eric Dumazet <edumazet@...gle.com>,
Willem de Bruijn <willemb@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
andrew+netdev@...n.ch,
horms@...nel.org,
davem@...emloft.net,
sdf@...ichev.me,
almasrymina@...gle.com,
dw@...idwei.uk,
michael.chan@...adcom.com,
dtatulea@...dia.com,
ap420073@...il.com
Subject: [RFC v1 06/22] eth: bnxt: read the page size from the adapter struct
From: Jakub Kicinski <kuba@...nel.org>
Switch from using a constant to storing the BNXT_RX_PAGE_SIZE
inside struct bnxt. This will allow configuring the page size
at runtime in subsequent patches.
The MSS size calculation for older chip continues to use the constant.
I'm intending to support the configuration only on more recent HW,
looks like on older chips setting this per queue won't work,
and that's the ultimate goal.
This patch should not change the current behavior as value
read from the struct will always be BNXT_RX_PAGE_SIZE at this stage.
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 27 ++++++++++---------
drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 +
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 4 +--
3 files changed, 17 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2cb3185c442c..274ebd63bdd9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -895,7 +895,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
{
- return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+ return rxr->need_head_pool || PAGE_SIZE > rxr->bnapi->bp->rx_page_size;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -905,9 +905,9 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
{
struct page *page;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ if (PAGE_SIZE > bp->rx_page_size) {
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
- BNXT_RX_PAGE_SIZE);
+ bp->rx_page_size);
} else {
page = page_pool_dev_alloc_pages(rxr->page_pool);
*offset = 0;
@@ -1139,9 +1139,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_page_size,
bp->rx_dir);
- skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
+ skb = napi_build_skb(data_ptr - bp->rx_offset, bp->rx_page_size);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
@@ -1173,7 +1173,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_page_size,
bp->rx_dir);
if (unlikely(!payload))
@@ -1187,7 +1187,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
- skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
+ skb_add_rx_frag(skb, 0, page, off, len, bp->rx_page_size);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
payload + NET_IP_ALIGN);
@@ -1272,7 +1272,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
if (skb) {
skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
cons_rx_buf->offset,
- frag_len, BNXT_RX_PAGE_SIZE);
+ frag_len, bp->rx_page_size);
} else {
skb_frag_t *frag = &shinfo->frags[i];
@@ -1297,7 +1297,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
if (skb) {
skb->len -= frag_len;
skb->data_len -= frag_len;
- skb->truesize -= BNXT_RX_PAGE_SIZE;
+ skb->truesize -= bp->rx_page_size;
}
--shinfo->nr_frags;
@@ -1312,7 +1312,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
}
page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
- BNXT_RX_PAGE_SIZE);
+ bp->rx_page_size);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -4448,7 +4448,7 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
- type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ type = ((u32)bp->rx_page_size << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
@@ -4710,7 +4710,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->rx_agg_nr_pages = 0;
if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
- agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
+ agg_factor = min_t(u32, 4, 65536 / bp->rx_page_size);
bp->flags &= ~BNXT_FLAG_JUMBO;
if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
@@ -7022,7 +7022,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
if (ring_type == HWRM_RING_ALLOC_AGG) {
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ req->rx_buf_size = cpu_to_le16(bp->rx_page_size);
enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
} else {
req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
@@ -16573,6 +16573,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp = netdev_priv(dev);
bp->board_idx = ent->driver_data;
bp->msg_enable = BNXT_DEF_MSG_ENABLE;
+ bp->rx_page_size = BNXT_RX_PAGE_SIZE;
bnxt_set_max_func_irqs(bp, max_irqs);
if (bnxt_vf_pciid(bp->board_idx))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index fda0d3cc6227..ac841d02d7ad 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2358,6 +2358,7 @@ struct bnxt {
u16 max_tpa;
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
+ u16 rx_page_size;
u16 rx_offset;
u16 rx_dma_offset;
enum dma_data_direction rx_dir;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 4a6d8cb9f970..32bcc3aedee6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -183,7 +183,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
- u32 buflen = BNXT_RX_PAGE_SIZE;
+ u32 buflen = bp->rx_page_size;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
dma_addr_t mapping;
@@ -470,7 +470,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * num_frags,
+ bp->rx_page_size * num_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
--
2.49.0
Powered by blists - more mailing lists