[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250714160451.124671-3-jeroendb@google.com>
Date: Mon, 14 Jul 2025 09:04:48 -0700
From: Jeroen de Borst <jeroendb@...gle.com>
To: netdev@...r.kernel.org
Cc: hramamurthy@...gle.com, davem@...emloft.net, edumazet@...gle.com,
kuba@...nel.org, willemb@...gle.com, pabeni@...hat.com,
Joshua Washington <joshwash@...gle.com>, Jeroen de Borst <jeroendb@...gle.com>
Subject: [PATCH net-next 2/5] gve: merge xdp and xsk registration
From: Joshua Washington <joshwash@...gle.com>
The existence of both of these xdp_rxq and xsk_rxq is redundant. xdp_rxq
can be used in both the zero-copy mode and the copy mode case. XSK pool
memory model registration is prioritized over normal memory model
registration to ensure that memory model registration happens only once
per queue.
Reviewed-by: Willem de Bruijn <willemb@...gle.com>
Signed-off-by: Joshua Washington <joshwash@...gle.com>
Signed-off-by: Jeroen de Borst <jeroendb@...gle.com>
---
drivers/net/ethernet/google/gve/gve.h | 1 -
drivers/net/ethernet/google/gve/gve_main.c | 25 +++++++++-------------
2 files changed, 10 insertions(+), 16 deletions(-)
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 53899096e89e..b2be3fca4125 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -331,7 +331,6 @@ struct gve_rx_ring {
/* XDP stuff */
struct xdp_rxq_info xdp_rxq;
- struct xdp_rxq_info xsk_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 5aca3145e6ab..d2797f55ae7c 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1167,8 +1167,8 @@ static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
rx = &priv->rx[qid];
rx->xsk_pool = NULL;
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg_mem_model(&rx->xdp_rxq);
if (!priv->tx)
return;
@@ -1185,11 +1185,7 @@ static int gve_reg_xsk_pool(struct gve_priv *priv, struct net_device *dev,
rx = &priv->rx[qid];
napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
- if (err)
- return err;
-
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, pool);
if (err) {
gve_unreg_xsk_pool(priv, qid);
@@ -1232,6 +1228,8 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct xsk_buff_pool *xsk_pool;
+
rx = &priv->rx[i];
napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
@@ -1239,7 +1237,11 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- if (gve_is_qpl(priv))
+
+ xsk_pool = xsk_get_pool_from_qid(dev, i);
+ if (xsk_pool)
+ err = gve_reg_xsk_pool(priv, dev, xsk_pool, i);
+ else if (gve_is_qpl(priv))
err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
@@ -1249,13 +1251,6 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
rx->dqo.page_pool);
if (err)
goto err;
- rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
- if (!rx->xsk_pool)
- continue;
-
- err = gve_reg_xsk_pool(priv, dev, rx->xsk_pool, i);
- if (err)
- goto err;
}
return 0;
--
2.50.0.727.gbf7dc18ff4-goog
Powered by blists - more mailing lists