lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250307003905.601175-1-hramamurthy@google.com>
Date: Fri,  7 Mar 2025 00:39:05 +0000
From: Harshitha Ramamurthy <hramamurthy@...gle.com>
To: netdev@...r.kernel.org
Cc: jeroendb@...gle.com, hramamurthy@...gle.com, andrew+netdev@...n.ch, 
	davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org, pabeni@...hat.com, 
	pkaligineedi@...gle.com, shailend@...gle.com, willemb@...gle.com, 
	ziweixiao@...gle.com, jacob.e.keller@...el.com, linux-kernel@...r.kernel.org, 
	Mina Almasry <almasrymina@...gle.com>
Subject: [PATCH net-next] gve: convert to use netmem for DQO RDA mode

To add netmem support to the gve driver, add a union
to the struct gve_rx_slot_page_info. netmem_ref is used for
DQO queue format's raw DMA addressing(RDA) mode. The struct
page is retained for other usecases.

Then, switch to using relevant netmem helper functions for
page pool and skb frag management.

Reviewed-by: Mina Almasry <almasrymina@...gle.com>
Reviewed-by: Willem de Bruijn <willemb@...gle.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@...gle.com>
---
 drivers/net/ethernet/google/gve/gve.h         |  8 ++++-
 .../ethernet/google/gve/gve_buffer_mgmt_dqo.c | 27 ++++++++-------
 drivers/net/ethernet/google/gve/gve_rx_dqo.c  | 34 ++++++++++++++-----
 3 files changed, 47 insertions(+), 22 deletions(-)

diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 216d6e157bef..483c43bab3a9 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -105,7 +105,13 @@ struct gve_rx_desc_queue {
 
 /* The page info for a single slot in the RX data queue */
 struct gve_rx_slot_page_info {
-	struct page *page;
+	/* netmem is used for DQO RDA mode
+	 * page is used in all other modes
+	 */
+	union {
+		struct page *page;
+		netmem_ref netmem;
+	};
 	void *page_address;
 	u32 page_offset; /* offset to write to in page */
 	unsigned int buf_size;
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 403f0f335ba6..af84cb88f828 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -205,32 +205,33 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
 			   struct gve_rx_buf_state_dqo *buf_state,
 			   bool allow_direct)
 {
-	struct page *page = buf_state->page_info.page;
+	netmem_ref netmem = buf_state->page_info.netmem;
 
-	if (!page)
+	if (!netmem)
 		return;
 
-	page_pool_put_full_page(page->pp, page, allow_direct);
-	buf_state->page_info.page = NULL;
+	page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
+	buf_state->page_info.netmem = 0;
 }
 
 static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
 				    struct gve_rx_buf_state_dqo *buf_state)
 {
 	struct gve_priv *priv = rx->gve;
-	struct page *page;
+	netmem_ref netmem;
 
 	buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
-	page = page_pool_alloc(rx->dqo.page_pool,
-			       &buf_state->page_info.page_offset,
-			       &buf_state->page_info.buf_size, GFP_ATOMIC);
+	netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
+					&buf_state->page_info.page_offset,
+					&buf_state->page_info.buf_size,
+					GFP_ATOMIC);
 
-	if (!page)
+	if (!netmem)
 		return -ENOMEM;
 
-	buf_state->page_info.page = page;
-	buf_state->page_info.page_address = page_address(page);
-	buf_state->addr = page_pool_get_dma_addr(page);
+	buf_state->page_info.netmem = netmem;
+	buf_state->page_info.page_address = netmem_address(netmem);
+	buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
 
 	return 0;
 }
@@ -269,7 +270,7 @@ void gve_reuse_buffer(struct gve_rx_ring *rx,
 		      struct gve_rx_buf_state_dqo *buf_state)
 {
 	if (rx->dqo.page_pool) {
-		buf_state->page_info.page = NULL;
+		buf_state->page_info.netmem = 0;
 		gve_free_buf_state(rx, buf_state);
 	} else {
 		gve_dec_pagecnt_bias(&buf_state->page_info);
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index f0674a443567..856ade0c209f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -476,6 +476,24 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
 	return 0;
 }
 
+static void gve_skb_add_rx_frag(struct gve_rx_ring *rx,
+				struct gve_rx_buf_state_dqo *buf_state,
+				int num_frags, u16 buf_len)
+{
+	if (rx->dqo.page_pool) {
+		skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags,
+				       buf_state->page_info.netmem,
+				       buf_state->page_info.page_offset,
+				       buf_len,
+				       buf_state->page_info.buf_size);
+	} else {
+		skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
+				buf_state->page_info.page,
+				buf_state->page_info.page_offset,
+				buf_len, buf_state->page_info.buf_size);
+	}
+}
+
 /* Chains multi skbs for single rx packet.
  * Returns 0 if buffer is appended, -1 otherwise.
  */
@@ -513,10 +531,7 @@ static int gve_rx_append_frags(struct napi_struct *napi,
 	if (gve_rx_should_trigger_copy_ondemand(rx))
 		return gve_rx_copy_ondemand(rx, buf_state, buf_len);
 
-	skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
-			buf_state->page_info.page,
-			buf_state->page_info.page_offset,
-			buf_len, buf_state->page_info.buf_size);
+	gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len);
 	gve_reuse_buffer(rx, buf_state);
 	return 0;
 }
@@ -561,7 +576,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
 	/* Page might have not been used for awhile and was likely last written
 	 * by a different thread.
 	 */
-	prefetch(buf_state->page_info.page);
+	if (rx->dqo.page_pool) {
+		if (!netmem_is_net_iov(buf_state->page_info.netmem))
+			prefetch(netmem_to_page(buf_state->page_info.netmem));
+	} else {
+		prefetch(buf_state->page_info.page);
+	}
 
 	/* Copy the header into the skb in the case of header split */
 	if (hsplit) {
@@ -632,9 +652,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
 	if (rx->dqo.page_pool)
 		skb_mark_for_recycle(rx->ctx.skb_head);
 
-	skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
-			buf_state->page_info.page_offset, buf_len,
-			buf_state->page_info.buf_size);
+	gve_skb_add_rx_frag(rx, buf_state, 0, buf_len);
 	gve_reuse_buffer(rx, buf_state);
 	return 0;
 
-- 
2.49.0.rc0.332.g42c0ae87b1-goog


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ