lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1463626996-51176-1-git-send-email-puck.chen@hisilicon.com>
Date:	Thu, 19 May 2016 11:03:16 +0800
From:	Chen Feng <puck.chen@...ilicon.com>
To:	<puck.chen@...ilicon.com>, <gregkh@...uxfoundation.org>,
	<arve@...roid.com>, <riandrews@...roid.com>, <labbott@...hat.com>,
	<paul.gortmaker@...driver.com>, <bmarsh94@...il.com>
CC:	<devel@...verdev.osuosl.org>, <linux-kernel@...r.kernel.org>,
	<puck.chen@...mail.com>, <dan.zhao@...ilicon.com>,
	<suzhuangluan@...ilicon.com>, <qijiwen@...ilicon.com>,
	<oliver.fu@...ilicon.com>, <xuyiping@...ilicon.com>,
	<saberlily.xia@...ilicon.com>
Subject: [PATCH v2] ION: Sys_heap: Add cached pool to spead up cached buffer alloc

Add ion cached pool in system heap. This patch add a cached pool
in system heap. It has a great improvement of alloc for cached
buffer.

With memory pressue alloc test 800MB in userspace used iontest.
The result avg is 577ms. Without patch it's avg is about 883ms.

v1: Makes the cached buffer zeroed before going to pool
v2: Add cached param in pool to distinguish wheather need to flush
    cache at a fresh alloc.
    Rework the shrink function.

Signed-off-by: Chen Feng <puck.chen@...ilicon.com>
Signed-off-by: Xia  Qing <saberlily.xia@...ilicon.com>
Reviewed-by: Fu Jun <oliver.fu@...ilicon.com>
---
 drivers/staging/android/ion/ion_page_pool.c   |  10 +-
 drivers/staging/android/ion/ion_priv.h        |   5 +-
 drivers/staging/android/ion/ion_system_heap.c | 183 +++++++++++++++++---------
 3 files changed, 133 insertions(+), 65 deletions(-)

diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 1fe8016..2c5e5c5 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -30,8 +30,9 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 
 	if (!page)
 		return NULL;
-	ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
-						DMA_BIDIRECTIONAL);
+	if (!pool->cached)
+		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+					  DMA_BIDIRECTIONAL);
 	return page;
 }
 
@@ -147,7 +148,8 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
 	return freed;
 }
 
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+					   bool cached)
 {
 	struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
 
@@ -161,6 +163,8 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
 	pool->order = order;
 	mutex_init(&pool->mutex);
 	plist_node_init(&pool->list, order);
+	if (cached)
+		pool->cached = true;
 
 	return pool;
 }
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index 0239883..f3cb8b4 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -360,6 +360,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
  * @gfp_mask:		gfp_mask to use from alloc
  * @order:		order of pages in the pool
  * @list:		plist node for list of pools
+ * @cached:		it's cached pool or not
  *
  * Allows you to keep a pool of pre allocated pages to use from your heap.
  * Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -369,6 +370,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
 struct ion_page_pool {
 	int high_count;
 	int low_count;
+	bool cached;
 	struct list_head high_items;
 	struct list_head low_items;
 	struct mutex mutex;
@@ -377,7 +379,8 @@ struct ion_page_pool {
 	struct plist_node list;
 };
 
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+					   bool cached);
 void ion_page_pool_destroy(struct ion_page_pool *);
 struct page *ion_page_pool_alloc(struct ion_page_pool *);
 void ion_page_pool_free(struct ion_page_pool *, struct page *);
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index b69dfc7..32a0ebf 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -26,16 +26,18 @@
 #include "ion.h"
 #include "ion_priv.h"
 
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
 				     __GFP_NORETRY) & ~__GFP_RECLAIM;
 static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
 static const unsigned int orders[] = {8, 4, 0};
-static const int num_orders = ARRAY_SIZE(orders);
+
 static int order_to_index(unsigned int order)
 {
 	int i;
 
-	for (i = 0; i < num_orders; i++)
+	for (i = 0; i < NUM_ORDERS; i++)
 		if (order == orders[i])
 			return i;
 	BUG();
@@ -49,47 +51,55 @@ static inline unsigned int order_to_size(int order)
 
 struct ion_system_heap {
 	struct ion_heap heap;
-	struct ion_page_pool *pools[0];
+	struct ion_page_pool *uncached_pools[NUM_ORDERS];
+	struct ion_page_pool *cached_pools[NUM_ORDERS];
 };
 
+/**
+ * The page from page-pool are all zeroed before. We need do cache
+ * clean for cached buffer. The uncached buffer are always non-cached
+ * since it's allocated. So no need for non-cached pages.
+ */
 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
 				      struct ion_buffer *buffer,
 				      unsigned long order)
 {
 	bool cached = ion_buffer_cached(buffer);
-	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+	struct ion_page_pool *pool;
 	struct page *page;
 
-	if (!cached) {
-		page = ion_page_pool_alloc(pool);
-	} else {
-		gfp_t gfp_flags = low_order_gfp_flags;
+	if (!cached)
+		pool = heap->uncached_pools[order_to_index(order)];
+	else
+		pool = heap->cached_pools[order_to_index(order)];
 
-		if (order > 4)
-			gfp_flags = high_order_gfp_flags;
-		page = alloc_pages(gfp_flags | __GFP_COMP, order);
-		if (!page)
-			return NULL;
-		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
-						DMA_BIDIRECTIONAL);
-	}
+	page = ion_page_pool_alloc(pool);
 
+	if (cached)
+		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+					  DMA_BIDIRECTIONAL);
 	return page;
 }
 
 static void free_buffer_page(struct ion_system_heap *heap,
 			     struct ion_buffer *buffer, struct page *page)
 {
+	struct ion_page_pool *pool;
 	unsigned int order = compound_order(page);
 	bool cached = ion_buffer_cached(buffer);
 
-	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
-		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-
-		ion_page_pool_free(pool, page);
-	} else {
+	/* go to system */
+	if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
 		__free_pages(page, order);
+		return;
 	}
+
+	if (!cached)
+		pool = heap->uncached_pools[order_to_index(order)];
+	else
+		pool = heap->cached_pools[order_to_index(order)];
+
+	ion_page_pool_free(pool, page);
 }
 
 
@@ -101,7 +111,7 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
 	struct page *page;
 	int i;
 
-	for (i = 0; i < num_orders; i++) {
+	for (i = 0; i < NUM_ORDERS; i++) {
 		if (size < order_to_size(orders[i]))
 			continue;
 		if (max_order < orders[i])
@@ -181,16 +191,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
 							struct ion_system_heap,
 							heap);
 	struct sg_table *table = buffer->sg_table;
-	bool cached = ion_buffer_cached(buffer);
 	struct scatterlist *sg;
 	int i;
 
-	/*
-	 *  uncached pages come from the page pools, zero them before returning
-	 *  for security purposes (other allocations are zerod at
-	 *  alloc time
-	 */
-	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+	/* zero the buffer before goto page pool */
+	if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
 		ion_heap_buffer_zero(buffer);
 
 	for_each_sg(table->sgl, sg, table->nents, i)
@@ -213,6 +218,8 @@ static void ion_system_heap_unmap_dma(struct ion_heap *heap,
 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
 					int nr_to_scan)
 {
+	struct ion_page_pool *uncached_pool;
+	struct ion_page_pool *cached_pool;
 	struct ion_system_heap *sys_heap;
 	int nr_total = 0;
 	int i, nr_freed;
@@ -223,20 +230,35 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
 	if (!nr_to_scan)
 		only_scan = 1;
 
-	for (i = 0; i < num_orders; i++) {
-		struct ion_page_pool *pool = sys_heap->pools[i];
-
-		nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
-		nr_total += nr_freed;
-
-		if (!only_scan) {
+	for (i = 0; i < NUM_ORDERS; i++) {
+		uncached_pool = sys_heap->uncached_pools[i];
+		cached_pool = sys_heap->cached_pools[i];
+
+		if (only_scan) {
+			nr_total += ion_page_pool_shrink(uncached_pool,
+							 gfp_mask,
+							 nr_to_scan);
+
+			nr_total += ion_page_pool_shrink(cached_pool,
+							 gfp_mask,
+							 nr_to_scan);
+		} else {
+			nr_freed = ion_page_pool_shrink(uncached_pool,
+							gfp_mask,
+							nr_to_scan);
 			nr_to_scan -= nr_freed;
-			/* shrink completed */
+			nr_total += nr_freed;
+			if (nr_to_scan <= 0)
+				break;
+			nr_freed = ion_page_pool_shrink(cached_pool,
+							gfp_mask,
+							nr_to_scan);
+			nr_to_scan -= nr_freed;
+			nr_total += nr_freed;
 			if (nr_to_scan <= 0)
 				break;
 		}
 	}
-
 	return nr_total;
 }
 
@@ -259,52 +281,89 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
 							struct ion_system_heap,
 							heap);
 	int i;
+	struct ion_page_pool *pool;
 
-	for (i = 0; i < num_orders; i++) {
-		struct ion_page_pool *pool = sys_heap->pools[i];
+	for (i = 0; i < NUM_ORDERS; i++) {
+		pool = sys_heap->uncached_pools[i];
 
-		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+		seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
 			   pool->high_count, pool->order,
 			   (PAGE_SIZE << pool->order) * pool->high_count);
-		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+		seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
+			   pool->low_count, pool->order,
+			   (PAGE_SIZE << pool->order) * pool->low_count);
+	}
+
+	for (i = 0; i < NUM_ORDERS; i++) {
+		pool = sys_heap->cached_pools[i];
+
+		seq_printf(s, "%d order %u highmem pages cached %lu total\n",
+			   pool->high_count, pool->order,
+			   (PAGE_SIZE << pool->order) * pool->high_count);
+		seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
 			   pool->low_count, pool->order,
 			   (PAGE_SIZE << pool->order) * pool->low_count);
 	}
 	return 0;
 }
 
+static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
+{
+	int i;
+
+	for (i = 0; i < NUM_ORDERS; i++)
+		if (pools[i])
+			ion_page_pool_destroy(pools[i]);
+}
+
+static int ion_system_heap_create_pools(struct ion_page_pool **pools,
+					bool cached)
+{
+	int i;
+	gfp_t gfp_flags = low_order_gfp_flags;
+
+	for (i = 0; i < NUM_ORDERS; i++) {
+		struct ion_page_pool *pool;
+
+		if (orders[i] > 4)
+			gfp_flags = high_order_gfp_flags;
+
+		pool = ion_page_pool_create(gfp_flags, orders[i], cached);
+		if (!pool)
+			goto err_create_pool;
+		pools[i] = pool;
+	}
+	return 0;
+
+err_create_pool:
+	ion_system_heap_destroy_pools(pools);
+	return -ENOMEM;
+}
+
 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
 {
 	struct ion_system_heap *heap;
-	int i;
 
-	heap = kzalloc(sizeof(struct ion_system_heap) +
-			sizeof(struct ion_page_pool *) * num_orders,
-			GFP_KERNEL);
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 	if (!heap)
 		return ERR_PTR(-ENOMEM);
 	heap->heap.ops = &system_heap_ops;
 	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
 	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
 
-	for (i = 0; i < num_orders; i++) {
-		struct ion_page_pool *pool;
-		gfp_t gfp_flags = low_order_gfp_flags;
+	if (ion_system_heap_create_pools(heap->uncached_pools, false))
+		goto free_heap;
 
-		if (orders[i] > 4)
-			gfp_flags = high_order_gfp_flags;
-		pool = ion_page_pool_create(gfp_flags, orders[i]);
-		if (!pool)
-			goto destroy_pools;
-		heap->pools[i] = pool;
-	}
+	if (ion_system_heap_create_pools(heap->cached_pools, true))
+		goto destroy_uncached_pools;
 
 	heap->heap.debug_show = ion_system_heap_debug_show;
 	return &heap->heap;
 
-destroy_pools:
-	while (i--)
-		ion_page_pool_destroy(heap->pools[i]);
+destroy_uncached_pools:
+	ion_system_heap_destroy_pools(heap->uncached_pools);
+
+free_heap:
 	kfree(heap);
 	return ERR_PTR(-ENOMEM);
 }
@@ -316,8 +375,10 @@ void ion_system_heap_destroy(struct ion_heap *heap)
 							heap);
 	int i;
 
-	for (i = 0; i < num_orders; i++)
-		ion_page_pool_destroy(sys_heap->pools[i]);
+	for (i = 0; i < NUM_ORDERS; i++) {
+		ion_page_pool_destroy(sys_heap->uncached_pools[i]);
+		ion_page_pool_destroy(sys_heap->cached_pools[i]);
+	}
 	kfree(sys_heap);
 }
 
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ