[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250829012304.4146195-12-kuba@kernel.org>
Date: Thu, 28 Aug 2025 18:23:01 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org,
edumazet@...gle.com,
pabeni@...hat.com,
andrew+netdev@...n.ch,
horms@...nel.org,
almasrymina@...gle.com,
tariqt@...dia.com,
dtatulea@...dia.com,
hawk@...nel.org,
ilias.apalodimas@...aro.org,
alexanderduyck@...com,
sdf@...ichev.me,
Jakub Kicinski <kuba@...nel.org>
Subject: [PATCH net-next v2 11/14] eth: fbnic: allocate unreadable page pool for the payloads
Allow allocating a page pool with unreadable memory for the payload
ring (sub1). We need to provide the queue ID so that the memory provider
can match the PP. Use the appropriate page pool DMA sync helper.
For unreadable mem the direction has to be FROM_DEVICE. The default
is BIDIR for XDP, but obviously unreadable mem is not compatible
with XDP in the first place, so that's fine. While at it remove
the define for page pool flags.
The rxq_idx is passed to fbnic_alloc_rx_qt_resources() explicitly
to make it easy to allocate page pools without NAPI (see the patch
after the next).
Reviewed-by: Mina Almasry <almasrymina@...gle.com>
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
---
v2:
- update commit msg
v1: https://lore.kernel.org/20250820025704.166248-13-kuba@kernel.org
---
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c | 31 +++++++++++++-------
1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 7694b25ef77d..2727cc037663 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -997,9 +997,8 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
FBNIC_BD_FRAG_SIZE;
/* Sync DMA buffer */
- dma_sync_single_range_for_cpu(nv->dev,
- page_pool_get_dma_addr_netmem(netmem),
- pg_off, truesize, DMA_BIDIRECTIONAL);
+ page_pool_dma_sync_netmem_for_cpu(qt->sub1.page_pool, netmem,
+ pg_off, truesize);
added = xdp_buff_add_frag(&pkt->buff, netmem, pg_off, len, truesize);
if (unlikely(!added)) {
@@ -1515,16 +1514,14 @@ void fbnic_free_napi_vectors(struct fbnic_net *fbn)
fbnic_free_napi_vector(fbn, fbn->napi[i]);
}
-#define FBNIC_PAGE_POOL_FLAGS \
- (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
-
static int
fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_napi_vector *nv,
- struct fbnic_q_triad *qt)
+ struct fbnic_q_triad *qt, unsigned int rxq_idx)
{
struct page_pool_params pp_params = {
.order = 0,
- .flags = FBNIC_PAGE_POOL_FLAGS,
+ .flags = PP_FLAG_DMA_MAP |
+ PP_FLAG_DMA_SYNC_DEV,
.pool_size = fbn->hpq_size + fbn->ppq_size,
.nid = NUMA_NO_NODE,
.dev = nv->dev,
@@ -1533,6 +1530,7 @@ fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_napi_vector *nv,
.max_len = PAGE_SIZE,
.napi = &nv->napi,
.netdev = fbn->netdev,
+ .queue_idx = rxq_idx,
};
struct page_pool *pp;
@@ -1553,10 +1551,23 @@ fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_napi_vector *nv,
return PTR_ERR(pp);
qt->sub0.page_pool = pp;
- page_pool_get(pp);
+ if (netif_rxq_has_unreadable_mp(fbn->netdev, rxq_idx)) {
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ goto err_destroy_sub0;
+ } else {
+ page_pool_get(pp);
+ }
qt->sub1.page_pool = pp;
return 0;
+
+err_destroy_sub0:
+ page_pool_destroy(pp);
+ return PTR_ERR(pp);
}
static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
@@ -1961,7 +1972,7 @@ static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
struct device *dev = fbn->netdev->dev.parent;
int err;
- err = fbnic_alloc_qt_page_pools(fbn, nv, qt);
+ err = fbnic_alloc_qt_page_pools(fbn, nv, qt, qt->cmpl.q_idx);
if (err)
return err;
--
2.51.0
Powered by blists - more mailing lists