[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250723222829.3528565-1-hramamurthy@google.com>
Date: Wed, 23 Jul 2025 22:28:29 +0000
From: Harshitha Ramamurthy <hramamurthy@...gle.com>
To: netdev@...r.kernel.org
Cc: jeroendb@...gle.com, hramamurthy@...gle.com, andrew+netdev@...n.ch,
davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org, pabeni@...hat.com,
willemb@...gle.com, pkaligineedi@...gle.com, joshwash@...gle.com,
linux-kernel@...r.kernel.org, Mina Almasry <almasrymina@...gle.com>,
Ziwei Xiao <ziweixiao@...gle.com>
Subject: [PATCH net-next] gve: support unreadable netmem
From: Mina Almasry <almasrymina@...gle.com>
Declare PP_FLAG_ALLOW_UNREADABLE_NETMEM to turn on unreadable netmem
support in GVE.
We also drop any net_iov packets where header split is not enabled.
We're unable to process packets where the header landed in unreadable
netmem.
Use page_pool_dma_sync_netmem_for_cpu in lieu of
dma_sync_single_range_for_cpu to correctly handle unreadable netmem
that should not be dma-sync'd.
Disable rx_copybreak optimization if payload is unreadable netmem as
that needs access to the payload.
Signed-off-by: Mina Almasry <almasrymina@...gle.com>
Signed-off-by: Ziwei Xiao <ziweixiao@...gle.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@...gle.com>
---
.../ethernet/google/gve/gve_buffer_mgmt_dqo.c | 5 +++
drivers/net/ethernet/google/gve/gve_rx_dqo.c | 36 ++++++++++++++++---
2 files changed, 36 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 8f5021e59e0a..0e2b703c673a 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -260,6 +260,11 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
+ if (priv->header_split_enabled) {
+ pp.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rx->q_num;
+ }
+
return page_pool_create(&pp);
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 7380c2b7a2d8..8c75a4d1e3e7 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -718,6 +718,24 @@ static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+static void gve_dma_sync(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state, u16 buf_len)
+{
+ struct gve_rx_slot_page_info *page_info = &buf_state->page_info;
+
+ if (rx->dqo.page_pool) {
+ page_pool_dma_sync_netmem_for_cpu(rx->dqo.page_pool,
+ page_info->netmem,
+ page_info->page_offset,
+ buf_len);
+ } else {
+ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+ page_info->page_offset +
+ page_info->pad,
+ buf_len, DMA_FROM_DEVICE);
+ }
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -793,13 +811,19 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
+ } else if (!rx->ctx.skb_head && rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem)) {
+ /* when header split is disabled, the header went to the packet
+ * buffer. If the packet buffer is a net_iov, those can't be
+ * easily mapped into the kernel space to access the header
+ * required to process the packet.
+ */
+ gve_free_buffer(rx, buf_state);
+ return -EFAULT;
}
/* Sync the portion of dma buffer for CPU to read. */
- dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset +
- buf_state->page_info.pad,
- buf_len, DMA_FROM_DEVICE);
+ gve_dma_sync(priv, rx, buf_state, buf_len);
/* Append to current skb if one exists. */
if (rx->ctx.skb_head) {
@@ -837,7 +861,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
u64_stats_update_end(&rx->statss);
}
- if (eop && buf_len <= priv->rx_copybreak) {
+ if (eop && buf_len <= priv->rx_copybreak &&
+ !(rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem))) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))
--
2.50.0.727.gbf7dc18ff4-goog
Powered by blists - more mailing lists