lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 10 May 2016 15:48:37 -0700
From:	Laura Abbott <labbott@...hat.com>
To:	Chen Feng <puck.chen@...ilicon.com>, saberlily.xia@...ilicon.com,
	gregkh@...uxfoundation.org, arve@...roid.com,
	riandrews@...roid.com, paul.gortmaker@...driver.com,
	bmarsh94@...il.com, devel@...verdev.osuosl.org,
	linux-kernel@...r.kernel.org
Cc:	xuyiping@...ilicon.com, suzhuangluan@...ilicon.com,
	dan.zhao@...ilicon.com, oliver.fu@...ilicon.com,
	linuxarm@...wei.com, puck.chen@...mail.com
Subject: Re: [PATCH v1] ION: Sys_heap: Add cached pool to spead up cached
 buffer alloc

On 05/09/2016 04:35 AM, Chen Feng wrote:
> Add ion cached pool in system heap. This patch add a cached pool
> in system heap. It has a great improvement of alloc for cached
> buffer.
>

Can you give some benchmark numbers here?

> v1: Makes the cached buffer zeroed before going to pool
>
> Signed-off-by: Chen Feng <puck.chen@...ilicon.com>
> Signed-off-by: Xia  Qing <saberlily.xia@...ilicon.com>
> Reviewed-by: Fu Jun <oliver.fu@...ilicon.com>
> ---
>  drivers/staging/android/ion/ion_system_heap.c | 155 +++++++++++++++++---------
>  1 file changed, 103 insertions(+), 52 deletions(-)
>
> diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
> index b69dfc7..4b14a0b 100644
> --- a/drivers/staging/android/ion/ion_system_heap.c
> +++ b/drivers/staging/android/ion/ion_system_heap.c
> @@ -49,47 +49,55 @@ static inline unsigned int order_to_size(int order)
>
>  struct ion_system_heap {
>  	struct ion_heap heap;
> -	struct ion_page_pool *pools[0];
> +	struct ion_page_pool *uncached_pools[0];
> +	struct ion_page_pool *cached_pools[0];
>  };
>


I don't think this is correct based on https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
and some discussions with others. Their tests showed this may just result in the
two struct members aliasing, which is not what we want.

The flexible array isn't really necessary here anyway. The number of orders we want
is known at compile time and

-static const int num_orders = ARRAY_SIZE(orders);
+
+#define NUM_ORDERS     ARRAY_SIZE(orders)
+

should let the NUM_ORDERS be used as a constant initializer in the struct.

> +/**
> + * The page from page-pool are all zeroed before. We need do cache
> + * clean for cached buffer. The uncached buffer are always non-cached
> + * since it's allocated. So no need for non-cached pages.
> + */
>  static struct page *alloc_buffer_page(struct ion_system_heap *heap,
>  				      struct ion_buffer *buffer,
>  				      unsigned long order)
>  {
>  	bool cached = ion_buffer_cached(buffer);
> -	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
> +	struct ion_page_pool *pool;
>  	struct page *page;
>
> -	if (!cached) {
> -		page = ion_page_pool_alloc(pool);
> -	} else {
> -		gfp_t gfp_flags = low_order_gfp_flags;
> +	if (!cached)
> +		pool = heap->uncached_pools[order_to_index(order)];
> +	else
> +		pool = heap->cached_pools[order_to_index(order)];
>
> -		if (order > 4)
> -			gfp_flags = high_order_gfp_flags;
> -		page = alloc_pages(gfp_flags | __GFP_COMP, order);
> -		if (!page)
> -			return NULL;
> -		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
> -						DMA_BIDIRECTIONAL);
> -	}
> +	page = ion_page_pool_alloc(pool);
>
> +	if (cached)
> +		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
> +					  DMA_BIDIRECTIONAL);
>  	return page;
>  }
>

This is doing an extra sync for newly allocated pages. If the buffer was
just allocated the sync should be skipped.

>  static void free_buffer_page(struct ion_system_heap *heap,
>  			     struct ion_buffer *buffer, struct page *page)
>  {
> +	struct ion_page_pool *pool;
>  	unsigned int order = compound_order(page);
>  	bool cached = ion_buffer_cached(buffer);
>
> -	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
> -		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
> -
> -		ion_page_pool_free(pool, page);
> -	} else {
> +	/* go to system */
> +	if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
>  		__free_pages(page, order);
> +		return;
>  	}
> +
> +	if (!cached)
> +		pool = heap->uncached_pools[order_to_index(order)];
> +	else
> +		pool = heap->cached_pools[order_to_index(order)];
> +
> +	ion_page_pool_free(pool, page);
>  }
>
>
> @@ -181,16 +189,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
>  							struct ion_system_heap,
>  							heap);
>  	struct sg_table *table = buffer->sg_table;
> -	bool cached = ion_buffer_cached(buffer);
>  	struct scatterlist *sg;
>  	int i;
>
> -	/*
> -	 *  uncached pages come from the page pools, zero them before returning
> -	 *  for security purposes (other allocations are zerod at
> -	 *  alloc time
> -	 */
> -	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
> +	/* zero the buffer before goto page pool */
> +	if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
>  		ion_heap_buffer_zero(buffer);
>
>  	for_each_sg(table->sgl, sg, table->nents, i)
> @@ -224,19 +227,29 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
>  		only_scan = 1;
>
>  	for (i = 0; i < num_orders; i++) {
> -		struct ion_page_pool *pool = sys_heap->pools[i];
> -
> -		nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
> -		nr_total += nr_freed;
> +		struct ion_page_pool *pool = sys_heap->uncached_pools[i];
>
>  		if (!only_scan) {
> +			nr_freed = ion_page_pool_shrink(pool, gfp_mask,
> +							nr_to_scan);
>  			nr_to_scan -= nr_freed;
> -			/* shrink completed */
> +			nr_total += nr_freed;
> +			pool = sys_heap->cached_pools[i];
> +			nr_freed = ion_page_pool_shrink(pool, gfp_mask,
> +							nr_to_scan);
> +			nr_to_scan -= nr_freed;
> +			nr_total += nr_freed;
>  			if (nr_to_scan <= 0)
>  				break;
> +		} else {
> +			nr_freed = ion_page_pool_shrink(pool, gfp_mask,
> +							nr_to_scan);
> +			nr_total += nr_freed;
> +			nr_freed = ion_page_pool_shrink(pool, gfp_mask,
> +							nr_to_scan);
> +			nr_total += nr_freed;
>  		}
>  	}
> -
>  	return nr_total;
>  }
>
> @@ -259,27 +272,69 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
>  							struct ion_system_heap,
>  							heap);
>  	int i;
> +	struct ion_page_pool *pool;
>
>  	for (i = 0; i < num_orders; i++) {
> -		struct ion_page_pool *pool = sys_heap->pools[i];
> +		pool = sys_heap->uncached_pools[i];
>
> -		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
> +		seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
>  			   pool->high_count, pool->order,
>  			   (PAGE_SIZE << pool->order) * pool->high_count);
> -		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
> +		seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
>  			   pool->low_count, pool->order,
>  			   (PAGE_SIZE << pool->order) * pool->low_count);
>  	}
> +
> +	for (i = 0; i < num_orders; i++) {
> +		pool = sys_heap->cached_pools[i];
> +
> +		seq_printf(s, "%d order %u highmem pages cached %lu total\n",
> +			   pool->high_count, pool->order,
> +			   (PAGE_SIZE << pool->order) * pool->high_count);
> +		seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
> +			   pool->low_count, pool->order,
> +			   (PAGE_SIZE << pool->order) * pool->low_count);
> +	}
> +	return 0;
> +}
> +
> +static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
> +{
> +	int i;
> +
> +	for (i = 0; i < num_orders; i++)
> +		if (pools[i])
> +			ion_page_pool_destroy(pools[i]);
> +}
> +
> +static int ion_system_heap_create_pools(struct ion_page_pool **pools)
> +{
> +	int i;
> +	gfp_t gfp_flags = low_order_gfp_flags;
> +
> +	for (i = 0; i < num_orders; i++) {
> +		struct ion_page_pool *pool;
> +
> +		if (orders[i] > 4)
> +			gfp_flags = high_order_gfp_flags;
> +
> +		pool = ion_page_pool_create(gfp_flags, orders[i]);
> +		if (!pool)
> +			goto err_create_pool;
> +		pools[i] = pool;
> +	}
>  	return 0;
> +err_create_pool:
> +	ion_system_heap_destroy_pools(pools);
> +	return -ENOMEM;
>  }
>
>  struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
>  {
>  	struct ion_system_heap *heap;
> -	int i;
>
>  	heap = kzalloc(sizeof(struct ion_system_heap) +
> -			sizeof(struct ion_page_pool *) * num_orders,
> +			sizeof(struct ion_page_pool *) * num_orders * 2,
>  			GFP_KERNEL);
>  	if (!heap)
>  		return ERR_PTR(-ENOMEM);
> @@ -287,24 +342,18 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
>  	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
>  	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
>
> -	for (i = 0; i < num_orders; i++) {
> -		struct ion_page_pool *pool;
> -		gfp_t gfp_flags = low_order_gfp_flags;
> +	if (ion_system_heap_create_pools(heap->uncached_pools))
> +		goto free_heap;
>
> -		if (orders[i] > 4)
> -			gfp_flags = high_order_gfp_flags;
> -		pool = ion_page_pool_create(gfp_flags, orders[i]);
> -		if (!pool)
> -			goto destroy_pools;
> -		heap->pools[i] = pool;
> -	}
> +	if (ion_system_heap_create_pools(heap->cached_pools))
> +		goto destroy_uncached_pools;
>
>  	heap->heap.debug_show = ion_system_heap_debug_show;
>  	return &heap->heap;
>
> -destroy_pools:
> -	while (i--)
> -		ion_page_pool_destroy(heap->pools[i]);
> +destroy_uncached_pools:
> +	ion_system_heap_destroy_pools(heap->uncached_pools);
> +free_heap:
>  	kfree(heap);
>  	return ERR_PTR(-ENOMEM);
>  }
> @@ -316,8 +365,10 @@ void ion_system_heap_destroy(struct ion_heap *heap)
>  							heap);
>  	int i;
>
> -	for (i = 0; i < num_orders; i++)
> -		ion_page_pool_destroy(sys_heap->pools[i]);
> +	for (i = 0; i < num_orders; i++) {
> +		ion_page_pool_destroy(sys_heap->uncached_pools[i]);
> +		ion_page_pool_destroy(sys_heap->cached_pools[i]);
> +	}
>  	kfree(sys_heap);
>  }
>
>

Thanks,
Laura

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ