lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 14 Mar 2017 10:10:01 -0700
From:   Laura Abbott <labbott@...hat.com>
To:     Junil Lee <junil0814.lee@....com>, sumit.semwal@...aro.org,
        gregkh@...uxfoundation.org, arve@...roid.com, riandrews@...roid.com
Cc:     devel@...verdev.osuosl.org, linux-kernel@...r.kernel.org,
        Bongkyu Kim <bongkyu.kim@....com>
Subject: Re: [PATCH] staging: android: ion: reduce lock contention latency

On 03/14/2017 12:51 AM, Junil Lee wrote:
> Replace list into lock-less list of ion page pool.
> 
> Measure how mutex lock contention latency on android.
> 
> 1. the test is done under android 7.0
> 2. startup many applications circularly
> 3. find sample in trace log as below
> 
>     cameraserver-625   [004] ...1  1891.952958: mutex_lock_enter: id=0
>     Binder:384_2-417   [005] ...1  1891.952958: mutex_lock_enter: id=0
>     Binder:384_2-417   [005] ...1  1891.952966: mutex_lock_enter: id=1
>     Binder:384_2-417   [005] ...1  1891.952970: mutex_lock_enter: id=0
>     Binder:384_2-417   [005] ...1  1891.952971: mutex_lock_enter: id=1
>     Binder:384_2-417   [005] ...1  1891.952982: mutex_lock_enter: id=0
>     Binder:384_2-417   [005] ...1  1891.952983: mutex_lock_enter: id=1
>     Binder:384_2-417   [005] ...1  1891.952989: mutex_lock_enter: id=0
>     Binder:384_2-417   [005] ...1  1891.952989: mutex_lock_enter: id=1
>     Binder:384_2-417   [005] ...1  1891.952995: mutex_lock_enter: id=0
>     cameraserver-625   [004] ...1  1891.952995: mutex_lock_enter: id=1
> 
>  - id 0 is try to lock, id 1 is locked
> 
> Figure out how many latency reduction by this patch as below.
> 
> The test is startup 60 applications circularly (repeat 10cycles)
>  - lock contention count : 3717 -> 93
> 

We really need to finish up the work to move Ion out of staging before
looking at performance improvements so please help with that discussion.
Once that is finished up, we can look at performance improvements again.

That said, this is removing the lock from the free path. Is the
contention happening on the free path? Is the system heap deferring
frees? Does this have a notable affect on anything besides the count
of contention? None of this is necessarily a deal breaker but I'd
like a few more details.

Thanks,
Laura

> Signed-off-by: Bongkyu Kim <bongkyu.kim@....com>
> Signed-off-by: Junil Lee <junil0814.lee@....com>
> ---
>  drivers/staging/android/ion/ion_page_pool.c   | 52 ++++++++++++++-------------
>  drivers/staging/android/ion/ion_priv.h        |  8 ++---
>  drivers/staging/android/ion/ion_system_heap.c | 16 ++++-----
>  3 files changed, 40 insertions(+), 36 deletions(-)
> 
> diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
> index aea89c1..1beb2c8 100644
> --- a/drivers/staging/android/ion/ion_page_pool.c
> +++ b/drivers/staging/android/ion/ion_page_pool.c
> @@ -22,6 +22,7 @@
>  #include <linux/init.h>
>  #include <linux/slab.h>
>  #include <linux/swap.h>
> +#include <linux/llist.h>
>  #include "ion_priv.h"
>  
>  static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
> @@ -44,33 +45,36 @@ static void ion_page_pool_free_pages(struct ion_page_pool *pool,
>  
>  static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
>  {
> -	mutex_lock(&pool->mutex);
>  	if (PageHighMem(page)) {
> -		list_add_tail(&page->lru, &pool->high_items);
> -		pool->high_count++;
> +		llist_add((struct llist_node *)&page->lru, &pool->high_items);
> +		atomic_inc(&pool->high_count);
>  	} else {
> -		list_add_tail(&page->lru, &pool->low_items);
> -		pool->low_count++;
> +		llist_add((struct llist_node *)&page->lru, &pool->low_items);
> +		atomic_inc(&pool->low_count);
>  	}
> -	mutex_unlock(&pool->mutex);
> +
>  	return 0;
>  }
>  
>  static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
>  {
> -	struct page *page;
> +	struct page *page = NULL;
> +	struct llist_node *node;
>  
>  	if (high) {
> -		BUG_ON(!pool->high_count);
> -		page = list_first_entry(&pool->high_items, struct page, lru);
> -		pool->high_count--;
> +		BUG_ON(!atomic_read(&pool->high_count));
> +		node = llist_del_first(&pool->high_items);
> +		if (node)
> +			node = llist_entry((struct list_head *)node, struct page, lru);
> +		atomic_dec(&pool->high_count);
>  	} else {
> -		BUG_ON(!pool->low_count);
> -		page = list_first_entry(&pool->low_items, struct page, lru);
> -		pool->low_count--;
> +		BUG_ON(!atomic_read(&pool->low_count));
> +		node = llist_del_first(&pool->low_items);
> +		if (node)
> +			node = llist_entry((struct list_head *)node, struct page, lru);
> +		atomic_dec(&pool->low_count);
>  	}
>  
> -	list_del(&page->lru);
>  	return page;
>  }
>  
> @@ -81,9 +85,9 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
>  	BUG_ON(!pool);
>  
>  	mutex_lock(&pool->mutex);
> -	if (pool->high_count)
> +	if (atomic_read(&pool->high_count))
>  		page = ion_page_pool_remove(pool, true);
> -	else if (pool->low_count)
> +	else if (atomic_read(&pool->low_count))
>  		page = ion_page_pool_remove(pool, false);
>  	mutex_unlock(&pool->mutex);
>  
> @@ -106,10 +110,10 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
>  
>  static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
>  {
> -	int count = pool->low_count;
> +	int count = atomic_read(&pool->low_count);
>  
>  	if (high)
> -		count += pool->high_count;
> +		count += atomic_read(&pool->high_count);
>  
>  	return count << pool->order;
>  }
> @@ -132,9 +136,9 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
>  		struct page *page;
>  
>  		mutex_lock(&pool->mutex);
> -		if (pool->low_count) {
> +		if (atomic_read(&pool->low_count)) {
>  			page = ion_page_pool_remove(pool, false);
> -		} else if (high && pool->high_count) {
> +		} else if (high && atomic_read(&pool->high_count)) {
>  			page = ion_page_pool_remove(pool, true);
>  		} else {
>  			mutex_unlock(&pool->mutex);
> @@ -155,10 +159,10 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
>  
>  	if (!pool)
>  		return NULL;
> -	pool->high_count = 0;
> -	pool->low_count = 0;
> -	INIT_LIST_HEAD(&pool->low_items);
> -	INIT_LIST_HEAD(&pool->high_items);
> +	atomic_set(&pool->high_count, 0);
> +	atomic_set(&pool->low_count, 0);
> +	init_llist_head(&pool->low_items);
> +	init_llist_head(&pool->high_items);
>  	pool->gfp_mask = gfp_mask | __GFP_COMP;
>  	pool->order = order;
>  	mutex_init(&pool->mutex);
> diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
> index 5b3059c..d4d5704 100644
> --- a/drivers/staging/android/ion/ion_priv.h
> +++ b/drivers/staging/android/ion/ion_priv.h
> @@ -414,11 +414,11 @@ void ion_cma_heap_destroy(struct ion_heap *heap);
>   * on many systems
>   */
>  struct ion_page_pool {
> -	int high_count;
> -	int low_count;
> +	atomic_t high_count;
> +	atomic_t low_count;
>  	bool cached;
> -	struct list_head high_items;
> -	struct list_head low_items;
> +	struct llist_head high_items;
> +	struct llist_head low_items;
>  	struct mutex mutex;
>  	gfp_t gfp_mask;
>  	unsigned int order;
> diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
> index 3ebbb75..8ee8d98 100644
> --- a/drivers/staging/android/ion/ion_system_heap.c
> +++ b/drivers/staging/android/ion/ion_system_heap.c
> @@ -274,22 +274,22 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
>  		pool = sys_heap->uncached_pools[i];
>  
>  		seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
> -			   pool->high_count, pool->order,
> -			   (PAGE_SIZE << pool->order) * pool->high_count);
> +			   atomic_read(&pool->high_count), pool->order,
> +			   (PAGE_SIZE << pool->order) * atomic_read(&pool->high_count));
>  		seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
> -			   pool->low_count, pool->order,
> -			   (PAGE_SIZE << pool->order) * pool->low_count);
> +			   atomic_read(&pool->low_count), pool->order,
> +			   (PAGE_SIZE << pool->order) * atomic_read(&pool->low_count));
>  	}
>  
>  	for (i = 0; i < NUM_ORDERS; i++) {
>  		pool = sys_heap->cached_pools[i];
>  
>  		seq_printf(s, "%d order %u highmem pages cached %lu total\n",
> -			   pool->high_count, pool->order,
> -			   (PAGE_SIZE << pool->order) * pool->high_count);
> +			   atomic_read(&pool->high_count), pool->order,
> +			   (PAGE_SIZE << pool->order) * atomic_read(&pool->high_count));
>  		seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
> -			   pool->low_count, pool->order,
> -			   (PAGE_SIZE << pool->order) * pool->low_count);
> +			   atomic_read(&pool->low_count), pool->order,
> +			   (PAGE_SIZE << pool->order) * atomic_read(&pool->low_count));
>  	}
>  	return 0;
>  }
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ