lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 7 Jul 2015 22:36:38 +0900
From:	Minchan Kim <minchan@...nel.org>
To:	Sergey Senozhatsky <sergey.senozhatsky@...il.com>
Cc:	Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	Sergey Senozhatsky <sergey.senozhatsky.work@...il.com>
Subject: Re: [PATCH v6 5/7] zsmalloc/zram: introduce zs_pool_stats api

On Tue, Jul 07, 2015 at 08:56:59PM +0900, Sergey Senozhatsky wrote:
> `zs_compact_control' accounts the number of migrated objects but
> it has a limited lifespan -- we lose it as soon as zs_compaction()
> returns back to zram. It worked fine, because (a) zram had it's own
> counter of migrated objects and (b) only zram could trigger
> compaction. However, this does not work for automatic pool
> compaction (not issued by zram). To account objects migrated
> during auto-compaction (issued by the shrinker) we need to store
> this number in zs_pool.
> 
> Define a new `struct zs_pool_stats' structure to keep zs_pool's
> stats there. It provides only `num_migrated', as of this writing,
> but it surely can be extended.
> 
> A new zsmalloc zs_pool_stats() symbol exports zs_pool's stats
> back to caller.
> 
> Use zs_pool_stats() in zram and remove `num_migrated' from
> zram_stats.
> 
> Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
> Suggested-by: Minchan Kim <minchan@...nel.org>
> ---
>  drivers/block/zram/zram_drv.c | 11 ++++++-----
>  drivers/block/zram/zram_drv.h |  1 -
>  include/linux/zsmalloc.h      |  6 ++++++
>  mm/zsmalloc.c                 | 42 ++++++++++++++++++++++--------------------
>  4 files changed, 34 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index fb655e8..aa22fe07 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -388,7 +388,6 @@ static ssize_t comp_algorithm_store(struct device *dev,
>  static ssize_t compact_store(struct device *dev,
>  		struct device_attribute *attr, const char *buf, size_t len)
>  {
> -	unsigned long nr_migrated;
>  	struct zram *zram = dev_to_zram(dev);
>  	struct zram_meta *meta;
>  
> @@ -399,8 +398,7 @@ static ssize_t compact_store(struct device *dev,
>  	}
>  
>  	meta = zram->meta;
> -	nr_migrated = zs_compact(meta->mem_pool);
> -	atomic64_add(nr_migrated, &zram->stats.num_migrated);
> +	zs_compact(meta->mem_pool);
>  	up_read(&zram->init_lock);
>  
>  	return len;
> @@ -428,13 +426,16 @@ static ssize_t mm_stat_show(struct device *dev,
>  		struct device_attribute *attr, char *buf)
>  {
>  	struct zram *zram = dev_to_zram(dev);
> +	struct zs_pool_stats pool_stats = {0};

Does it work even if first member of the structure is non-scalar?
Personally I prefer memset for initliazation.
I believe modern compiler would optimize that quite well.


>  	u64 orig_size, mem_used = 0;
>  	long max_used;
>  	ssize_t ret;
>  
>  	down_read(&zram->init_lock);
> -	if (init_done(zram))
> +	if (init_done(zram)) {
>  		mem_used = zs_get_total_pages(zram->meta->mem_pool);
> +		zs_pool_stats(zram->meta->mem_pool, &pool_stats);
> +	}
>  
>  	orig_size = atomic64_read(&zram->stats.pages_stored);
>  	max_used = atomic_long_read(&zram->stats.max_used_pages);
> @@ -447,7 +448,7 @@ static ssize_t mm_stat_show(struct device *dev,
>  			zram->limit_pages << PAGE_SHIFT,
>  			max_used << PAGE_SHIFT,
>  			(u64)atomic64_read(&zram->stats.zero_pages),
> -			(u64)atomic64_read(&zram->stats.num_migrated));
> +			pool_stats.num_migrated);
>  	up_read(&zram->init_lock);
>  
>  	return ret;
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 6dbe2df..8e92339 100644
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -78,7 +78,6 @@ struct zram_stats {
>  	atomic64_t compr_data_size;	/* compressed size of pages stored */
>  	atomic64_t num_reads;	/* failed + successful */
>  	atomic64_t num_writes;	/* --do-- */
> -	atomic64_t num_migrated;	/* no. of migrated object */
>  	atomic64_t failed_reads;	/* can happen when memory is too low */
>  	atomic64_t failed_writes;	/* can happen when memory is too low */
>  	atomic64_t invalid_io;	/* non-page-aligned I/O requests */
> diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
> index 1338190..9340fce 100644
> --- a/include/linux/zsmalloc.h
> +++ b/include/linux/zsmalloc.h
> @@ -34,6 +34,11 @@ enum zs_mapmode {
>  	 */
>  };
>  
> +struct zs_pool_stats {
> +	/* How many objects were migrated */
> +	u64		num_migrated;
> +};
> +
>  struct zs_pool;
>  
>  struct zs_pool *zs_create_pool(char *name, gfp_t flags);
> @@ -49,4 +54,5 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
>  unsigned long zs_get_total_pages(struct zs_pool *pool);
>  unsigned long zs_compact(struct zs_pool *pool);
>  
> +void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
>  #endif
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index ce1484e..db3cb2d 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -237,16 +237,18 @@ struct link_free {
>  };
>  
>  struct zs_pool {
> -	char *name;
> +	char			*name;

huge tab?

>  
> -	struct size_class **size_class;
> -	struct kmem_cache *handle_cachep;
> +	struct size_class	**size_class;
> +	struct kmem_cache	*handle_cachep;

tab?
tab?

>  
> -	gfp_t flags;	/* allocation flags used when growing pool */
> -	atomic_long_t pages_allocated;

Why changes comment position?

> +	/* Allocation flags used when growing pool */
> +	gfp_t			flags;
> +	atomic_long_t		pages_allocated;
>  

Why blank line?

> +	struct zs_pool_stats	stats;
>  #ifdef CONFIG_ZSMALLOC_STAT
> -	struct dentry *stat_dentry;
> +	struct dentry		*stat_dentry;

Tab.

>  #endif
>  };
>  
> @@ -1587,7 +1589,7 @@ struct zs_compact_control {
>  	 /* Starting object index within @s_page which used for live object
>  	  * in the subpage. */
>  	int index;
> -	/* how many of objects are migrated */
> +	/* How many of objects were migrated */
>  	int nr_migrated;
>  };
>  
> @@ -1599,7 +1601,6 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
>  	struct page *s_page = cc->s_page;
>  	struct page *d_page = cc->d_page;
>  	unsigned long index = cc->index;
> -	int nr_migrated = 0;
>  	int ret = 0;
>  
>  	while (1) {
> @@ -1626,13 +1627,12 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
>  		record_obj(handle, free_obj);
>  		unpin_tag(handle);
>  		obj_free(pool, class, used_obj);
> -		nr_migrated++;
> +		cc->nr_migrated++;
>  	}
>  
>  	/* Remember last position in this iteration */
>  	cc->s_page = s_page;
>  	cc->index = index;
> -	cc->nr_migrated = nr_migrated;
>  
>  	return ret;
>  }
> @@ -1707,14 +1707,13 @@ static unsigned long zs_can_compact(struct size_class *class)
>  	return obj_wasted * get_pages_per_zspage(class->size);
>  }
>  
> -static unsigned long __zs_compact(struct zs_pool *pool,
> -				struct size_class *class)
> +static void __zs_compact(struct zs_pool *pool, struct size_class *class)
>  {
>  	struct zs_compact_control cc;
>  	struct page *src_page;
>  	struct page *dst_page = NULL;
> -	unsigned long nr_total_migrated = 0;
>  
> +	cc.nr_migrated = 0;
>  	spin_lock(&class->lock);
>  	while ((src_page = isolate_source_page(class))) {
>  
> @@ -1736,7 +1735,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
>  				break;
>  
>  			putback_zspage(pool, class, dst_page);
> -			nr_total_migrated += cc.nr_migrated;
>  		}
>  
>  		/* Stop if we couldn't find slot */
> @@ -1746,7 +1744,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
>  		putback_zspage(pool, class, dst_page);
>  		putback_zspage(pool, class, src_page);
>  		spin_unlock(&class->lock);
> -		nr_total_migrated += cc.nr_migrated;
>  		cond_resched();
>  		spin_lock(&class->lock);
>  	}
> @@ -1754,15 +1751,14 @@ static unsigned long __zs_compact(struct zs_pool *pool,
>  	if (src_page)
>  		putback_zspage(pool, class, src_page);
>  
> -	spin_unlock(&class->lock);
> +	pool->stats.num_migrated += cc.nr_migrated;
>  
> -	return nr_total_migrated;
> +	spin_unlock(&class->lock);
>  }
>  
>  unsigned long zs_compact(struct zs_pool *pool)
>  {
>  	int i;
> -	unsigned long nr_migrated = 0;
>  	struct size_class *class;
>  
>  	for (i = zs_size_classes - 1; i >= 0; i--) {
> @@ -1771,13 +1767,19 @@ unsigned long zs_compact(struct zs_pool *pool)
>  			continue;
>  		if (class->index != i)
>  			continue;
> -		nr_migrated += __zs_compact(pool, class);
> +		__zs_compact(pool, class);
>  	}
>  
> -	return nr_migrated;
> +	return pool->stats.num_migrated;
>  }
>  EXPORT_SYMBOL_GPL(zs_compact);
>  
> +void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
> +{
> +	memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
> +}
> +EXPORT_SYMBOL_GPL(zs_pool_stats);
> +
>  /**
>   * zs_create_pool - Creates an allocation pool to work from.
>   * @flags: allocation flags used to allocate pool metadata
> -- 
> 2.4.5
> 

-- 
Kind regards,
Minchan Kim
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ