lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <154413875746.21735.15285083274392253125.stgit@firesoul>
Date:   Fri, 07 Dec 2018 00:25:57 +0100
From:   Jesper Dangaard Brouer <brouer@...hat.com>
To:     netdev@...r.kernel.org, "David S. Miller" <davem@...emloft.net>,
        Jesper Dangaard Brouer <brouer@...hat.com>
Cc:     Toke Høiland-Jørgensen <toke@...e.dk>,
        ard.biesheuvel@...aro.org, Jason Wang <jasowang@...hat.com>,
        ilias.apalodimas@...aro.org,
        BjörnTöpel <bjorn.topel@...el.com>,
        w@....eu, Saeed Mahameed <saeedm@...lanox.com>,
        mykyta.iziumtsev@...il.com,
        Daniel Borkmann <borkmann@...earbox.net>,
        Alexei Starovoitov <alexei.starovoitov@...il.com>,
        Tariq Toukan <tariqt@...lanox.com>
Subject: [net-next PATCH RFC 6/8] mvneta: activate page recycling via skb
 using page_pool

Previous mvneta patches have already started to use page_pool, but
this was primarily for RX page alloc-side and for doing DMA map/unmap
handling.  Pages traveling through the netstack was unmapped and
returned through the normal page allocator.

It is now time to activate that pages are recycled back. This involves
registering the page_pool with the XDP rxq memory model API, even-though
the driver doesn't support XDP itself yet.  And simply updating the
SKB->mem_info field with info from the xdp_rxq_info.

Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
Signed-off-by: Ilias Apalodimas <ilias.apalodimas@...aro.org>
---
 drivers/net/ethernet/marvell/mvneta.c |   29 +++++++++++++++++++++++++----
 1 file changed, 25 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 78f1fcdc1f00..449c19829d67 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -628,6 +628,9 @@ struct mvneta_rx_queue {
 	/* page pool */
 	struct page_pool *page_pool;
 
+	/* XDP */
+	struct xdp_rxq_info xdp_rxq;
+
 	/* error counters */
 	u32 skb_alloc_err;
 	u32 refill_err;
@@ -1892,6 +1895,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 		page_pool_put_page(rxq->page_pool, data, false);
 	}
 
+	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+		xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
 	if (rxq->page_pool)
 		page_pool_destroy(rxq->page_pool);
 }
@@ -1978,11 +1984,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 
 			rx_desc->buf_phys_addr = 0;
 			frag_num = 0;
+			rxq->skb->mem_info = rxq->xdp_rxq.mem;
 			skb_reserve(rxq->skb, MVNETA_MH_SIZE + NET_SKB_PAD);
 			skb_put(rxq->skb, rx_bytes < PAGE_SIZE ? rx_bytes :
 				PAGE_SIZE);
 			mvneta_rx_csum(pp, rx_status, rxq->skb);
-			page_pool_unmap_page(rxq->page_pool, page);
 			rxq->left_size = rx_bytes < PAGE_SIZE ? 0 : rx_bytes -
 				PAGE_SIZE;
 		} else {
@@ -2001,7 +2007,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 				skb_add_rx_frag(rxq->skb, frag_num, page,
 						0, frag_size,
 						PAGE_SIZE);
-
+				/* skb frags[] are not recycled, unmap now */
 				page_pool_unmap_page(rxq->page_pool, page);
 
 				rxq->left_size -= frag_size;
@@ -2815,10 +2821,25 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 			   int num)
 {
-	int i = 0;
+	int err, i = 0;
+
+	err = mvneta_create_page_pool(pp, rxq, num);
+	if (err)
+		goto out;
 
-	if (mvneta_create_page_pool(pp, rxq, num))
+	err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
+	if (err) {
+		page_pool_destroy(rxq->page_pool);
+		goto out;
+	}
+
+	err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+					 rxq->page_pool);
+	if (err) {
+		xdp_rxq_info_unreg(&rxq->xdp_rxq);
+		page_pool_destroy(rxq->page_pool);
 		goto out;
+	}
 
 	for (i = 0; i < num; i++) {
 		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ