lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 07 Dec 2018 00:25:37 +0100
From:   Jesper Dangaard Brouer <brouer@...hat.com>
To:     netdev@...r.kernel.org, "David S. Miller" <davem@...emloft.net>,
        Jesper Dangaard Brouer <brouer@...hat.com>
Cc:     Toke Høiland-Jørgensen <toke@...e.dk>,
        ard.biesheuvel@...aro.org, Jason Wang <jasowang@...hat.com>,
        ilias.apalodimas@...aro.org,
        BjörnTöpel <bjorn.topel@...el.com>,
        w@....eu, Saeed Mahameed <saeedm@...lanox.com>,
        mykyta.iziumtsev@...il.com,
        Daniel Borkmann <borkmann@...earbox.net>,
        Alexei Starovoitov <alexei.starovoitov@...il.com>,
        Tariq Toukan <tariqt@...lanox.com>
Subject: [net-next PATCH RFC 2/8] net: mvneta: use page pool API for sw
 buffer manager

From: Ilias Apalodimas <ilias.apalodimas@...aro.org>

Use the page_pool api for allocations and DMA handling instead of
__dev_alloc_page()/dma_map_page() and free_page()/dma_unmap_page().

The page_pool API offers buffer recycling capabilities for XDP but
allocates one page per packet, unless the driver splits and manages
the allocated page.

Although XDP is not a part of the driver yet, the current implementation
is allocating one page per packet, thus there's no performance penalty from
using the API.

For now pages are unmapped via page_pool_unmap_page() before packets
travel into the network stack, as it doesn't have a return hook yet.
Given this call cleared the page_pool state, it is safe to let the
page be returned to the normal page allocator.

Signed-off-by: Ilias Apalodimas <ilias.apalodimas@...aro.org>
Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
---
 drivers/net/ethernet/marvell/Kconfig  |    1 +
 drivers/net/ethernet/marvell/mvneta.c |   56 ++++++++++++++++++++++++---------
 2 files changed, 41 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 3238aa7f5dac..3325abe67465 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -60,6 +60,7 @@ config MVNETA
 	depends on ARCH_MVEBU || COMPILE_TEST
 	select MVMDIO
 	select PHYLINK
+	select PAGE_POOL
 	---help---
 	  This driver supports the network interface units in the
 	  Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5bfd349bf41a..2354421fe96f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -33,6 +33,7 @@
 #include <linux/skbuff.h>
 #include <net/hwbm.h>
 #include "mvneta_bm.h"
+#include <net/page_pool.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/tso.h>
@@ -624,6 +625,9 @@ struct mvneta_rx_queue {
 	struct sk_buff *skb;
 	int left_size;
 
+	/* page pool */
+	struct page_pool *page_pool;
+
 	/* error counters */
 	u32 skb_alloc_err;
 	u32 refill_err;
@@ -1813,17 +1817,11 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
 	dma_addr_t phys_addr;
 	struct page *page;
 
-	page = __dev_alloc_page(gfp_mask);
+	page = page_pool_dev_alloc_pages(rxq->page_pool);
 	if (!page)
 		return -ENOMEM;
 
-	/* map page for use */
-	phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
-				 DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
-		__free_page(page);
-		return -ENOMEM;
-	}
+	phys_addr = page_pool_get_dma_addr(page);
 
 	phys_addr += pp->rx_offset_correction;
 	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
@@ -1892,10 +1890,11 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 		if (!data || !(rx_desc->buf_phys_addr))
 			continue;
 
-		dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
-			       PAGE_SIZE, DMA_FROM_DEVICE);
-		__free_page(data);
+		page_pool_put_page(rxq->page_pool, data, false);
 	}
+
+	if (rxq->page_pool)
+		page_pool_destroy(rxq->page_pool);
 }
 
 static inline
@@ -2010,8 +2009,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 				skb_add_rx_frag(rxq->skb, frag_num, page,
 						frag_offset, frag_size,
 						PAGE_SIZE);
-				dma_unmap_page(dev->dev.parent, phys_addr,
-					       PAGE_SIZE, DMA_FROM_DEVICE);
+				page_pool_unmap_page(rxq->page_pool, page);
 				rxq->left_size -= frag_size;
 			}
 		} else {
@@ -2041,8 +2039,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 						frag_offset, frag_size,
 						PAGE_SIZE);
 
-				dma_unmap_page(dev->dev.parent, phys_addr,
-					       PAGE_SIZE, DMA_FROM_DEVICE);
+				page_pool_unmap_page(rxq->page_pool, page);
 
 				rxq->left_size -= frag_size;
 			}
@@ -2828,11 +2825,37 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 	return rx_done;
 }
 
+static int mvneta_create_page_pool(struct mvneta_port *pp,
+				   struct mvneta_rx_queue *rxq, int num)
+{
+	struct page_pool_params pp_params = { 0 };
+	int err = 0;
+
+	pp_params.order = 0;
+	/* internal DMA mapping in page_pool */
+	pp_params.flags = PP_FLAG_DMA_MAP;
+	pp_params.pool_size = num;
+	pp_params.nid = NUMA_NO_NODE;
+	pp_params.dev = pp->dev->dev.parent;
+	pp_params.dma_dir = DMA_FROM_DEVICE;
+
+	rxq->page_pool = page_pool_create(&pp_params);
+	if (IS_ERR(rxq->page_pool)) {
+		err = PTR_ERR(rxq->page_pool);
+		rxq->page_pool = NULL;
+	}
+
+	return err;
+}
+
 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 			   int num)
 {
-	int i;
+	int i = 0;
+
+	if (mvneta_create_page_pool(pp, rxq, num))
+		goto out;
 
 	for (i = 0; i < num; i++) {
 		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
@@ -2848,6 +2871,7 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 	/* Add this number of RX descriptors as non occupied (ready to
 	 * get packets)
 	 */
+out:
 	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
 
 	return i;

Powered by blists - more mailing lists