[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250717152839.973004-3-jeroendb@google.com>
Date: Thu, 17 Jul 2025 08:28:36 -0700
From: Jeroen de Borst <jeroendb@...gle.com>
To: netdev@...r.kernel.org
Cc: hramamurthy@...gle.com, davem@...emloft.net, edumazet@...gle.com,
kuba@...nel.org, willemb@...gle.com, pabeni@...hat.com,
Joshua Washington <joshwash@...gle.com>, Jeroen de Borst <jeroendb@...gle.com>
Subject: [PATCH net-next v2 2/5] gve: merge xdp and xsk registration
From: Joshua Washington <joshwash@...gle.com>
The existence of both of these xdp_rxq and xsk_rxq is redundant. xdp_rxq
can be used in both the zero-copy mode and the copy mode case. XSK pool
memory model registration is prioritized over normal memory model
registration to ensure that memory model registration happens only once
per queue.
Reviewed-by: Willem de Bruijn <willemb@...gle.com>
Signed-off-by: Joshua Washington <joshwash@...gle.com>
Signed-off-by: Jeroen de Borst <jeroendb@...gle.com>
---
v2: Remove unused napi_struct pointer
---
drivers/net/ethernet/google/gve/gve.h | 1 -
drivers/net/ethernet/google/gve/gve_main.c | 27 ++++++++--------------
2 files changed, 10 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 53899096e89e..b2be3fca4125 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -331,7 +331,6 @@ struct gve_rx_ring {
/* XDP stuff */
struct xdp_rxq_info xdp_rxq;
- struct xdp_rxq_info xsk_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 5aca3145e6ab..cf8e1abdfa8e 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1167,8 +1167,8 @@ static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
rx = &priv->rx[qid];
rx->xsk_pool = NULL;
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg_mem_model(&rx->xdp_rxq);
if (!priv->tx)
return;
@@ -1178,18 +1178,12 @@ static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
static int gve_reg_xsk_pool(struct gve_priv *priv, struct net_device *dev,
struct xsk_buff_pool *pool, u16 qid)
{
- struct napi_struct *napi;
struct gve_rx_ring *rx;
u16 tx_qid;
int err;
rx = &priv->rx[qid];
- napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
- if (err)
- return err;
-
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, pool);
if (err) {
gve_unreg_xsk_pool(priv, qid);
@@ -1232,6 +1226,8 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct xsk_buff_pool *xsk_pool;
+
rx = &priv->rx[i];
napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
@@ -1239,7 +1235,11 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- if (gve_is_qpl(priv))
+
+ xsk_pool = xsk_get_pool_from_qid(dev, i);
+ if (xsk_pool)
+ err = gve_reg_xsk_pool(priv, dev, xsk_pool, i);
+ else if (gve_is_qpl(priv))
err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
@@ -1249,13 +1249,6 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
rx->dqo.page_pool);
if (err)
goto err;
- rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
- if (!rx->xsk_pool)
- continue;
-
- err = gve_reg_xsk_pool(priv, dev, rx->xsk_pool, i);
- if (err)
- goto err;
}
return 0;
--
2.50.0.727.gbf7dc18ff4-goog
Powered by blists - more mailing lists