[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250820025704.166248-4-kuba@kernel.org>
Date: Tue, 19 Aug 2025 19:56:52 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org,
edumazet@...gle.com,
pabeni@...hat.com,
andrew+netdev@...n.ch,
horms@...nel.org,
almasrymina@...gle.com,
michael.chan@...adcom.com,
tariqt@...dia.com,
dtatulea@...dia.com,
hawk@...nel.org,
ilias.apalodimas@...aro.org,
alexanderduyck@...com,
sdf@...ichev.me,
Jakub Kicinski <kuba@...nel.org>
Subject: [PATCH net-next 03/15] eth: fbnic: move xdp_rxq_info_reg() to resource alloc
Move rxq_info and mem model registration from fbnic_alloc_napi_vector()
and fbnic_alloc_nv_resources() to fbnic_alloc_rx_qt_resources().
The rxq_info is now registered later in the process, but that
should not cause any issues.
rxq_info lives in the fbnic_q_triad (qt) struct so qt init is a more
natural place. Encapsulating the logic in the qt functions will also
allow simplifying the cleanup in the NAPI related alloc functions
in the next commit.
Rx does not have a dedicated fbnic_free_rx_qt_resources(),
but we can use xdp_rxq_info_is_reg() to tell whether given
rxq_info was in use (effectively - if it's a qt for an Rx queue).
Having to pass nv into fbnic_alloc_rx_qt_resources() is not
great in terms of layering, but that's temporary, pp will
move soon..
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
---
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c | 58 +++++++++-----------
1 file changed, 26 insertions(+), 32 deletions(-)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 7f8bdb08db9f..29a780f72c14 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -1482,7 +1482,6 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
}
for (j = 0; j < nv->rxt_count; j++, i++) {
- xdp_rxq_info_unreg(&nv->qt[i].xdp_rxq);
fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
@@ -1686,11 +1685,6 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
if (err)
goto free_ring_cur_qt;
- err = xdp_rxq_info_reg(&qt->xdp_rxq, fbn->netdev, rxq_idx,
- nv->napi.napi_id);
- if (err)
- goto free_qt_pp;
-
/* Update Rx queue index */
rxt_count--;
rxq_idx += v_count;
@@ -1704,8 +1698,6 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
while (rxt_count < nv->rxt_count) {
qt--;
- xdp_rxq_info_unreg(&qt->xdp_rxq);
-free_qt_pp:
fbnic_free_qt_page_pools(qt);
free_ring_cur_qt:
fbnic_remove_rx_ring(fbn, &qt->sub0);
@@ -1938,6 +1930,11 @@ static void fbnic_free_qt_resources(struct fbnic_net *fbn,
fbnic_free_ring_resources(dev, &qt->cmpl);
fbnic_free_ring_resources(dev, &qt->sub1);
fbnic_free_ring_resources(dev, &qt->sub0);
+
+ if (xdp_rxq_info_is_reg(&qt->xdp_rxq)) {
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
+ }
}
static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
@@ -1968,15 +1965,27 @@ static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
}
static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt)
{
struct device *dev = fbn->netdev->dev.parent;
int err;
- err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ err = xdp_rxq_info_reg(&qt->xdp_rxq, fbn->netdev, qt->sub0.q_idx,
+ nv->napi.napi_id);
if (err)
return err;
+ /* Register XDP memory model for completion queue */
+ err = xdp_rxq_info_reg_mem_model(&qt->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ qt->sub0.page_pool);
+ if (err)
+ goto unreg_rxq;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ goto unreg_mm;
+
err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
if (err)
goto free_sub0;
@@ -1991,22 +2000,20 @@ static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
fbnic_free_ring_resources(dev, &qt->sub1);
free_sub0:
fbnic_free_ring_resources(dev, &qt->sub0);
+unreg_mm:
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+unreg_rxq:
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
return err;
}
static void fbnic_free_nv_resources(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
- int i, j;
+ int i;
- /* Free Tx Resources */
- for (i = 0; i < nv->txt_count; i++)
+ for (i = 0; i < nv->txt_count + nv->rxt_count; i++)
fbnic_free_qt_resources(fbn, &nv->qt[i]);
-
- for (j = 0; j < nv->rxt_count; j++, i++) {
- fbnic_free_qt_resources(fbn, &nv->qt[i]);
- xdp_rxq_info_unreg_mem_model(&nv->qt[i].xdp_rxq);
- }
}
static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
@@ -2023,26 +2030,13 @@ static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
/* Allocate Rx Resources */
for (j = 0; j < nv->rxt_count; j++, i++) {
- /* Register XDP memory model for completion queue */
- err = xdp_reg_mem_model(&nv->qt[i].xdp_rxq.mem,
- MEM_TYPE_PAGE_POOL,
- nv->qt[i].sub0.page_pool);
+ err = fbnic_alloc_rx_qt_resources(fbn, nv, &nv->qt[i]);
if (err)
- goto xdp_unreg_mem_model;
-
- err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
- if (err)
- goto xdp_unreg_cur_model;
+ goto free_qt_resources;
}
return 0;
-xdp_unreg_mem_model:
- while (j-- && i--) {
- fbnic_free_qt_resources(fbn, &nv->qt[i]);
-xdp_unreg_cur_model:
- xdp_rxq_info_unreg_mem_model(&nv->qt[i].xdp_rxq);
- }
free_qt_resources:
while (i--)
fbnic_free_qt_resources(fbn, &nv->qt[i]);
--
2.50.1
Powered by blists - more mailing lists