lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1513938606-17735-1-git-send-email-gopi.st@samsung.com>
Date:   Fri, 22 Dec 2017 16:00:06 +0530
From:   Gopi Sai Teja <gopi.st@...sung.com>
To:     minchan@...nel.org, ngupta@...are.org,
        sergey.senozhatsky.work@...il.com
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        v.narang@...sung.com, pankaj.m@...sung.com, a.sahrawat@...sung.com,
        prakash.a@...sung.com, himanshu.sh@...sung.com,
        lalit.mohan@...sung.com, Gopi Sai Teja <gopi.st@...sung.com>
Subject: [PATCH v2] zram: better utilization of zram swap space

75% of the PAGE_SIZE is not a correct threshold to store uncompressed
pages in zs_page as this must be changed if the maximum pages stored
in zspage changes. Instead using zs classes, we can set the correct
threshold irrespective of the maximum pages stored in zspage.

Tested on ARM:

Before Patch:
class  size  obj_allocated   obj_used pages_used
....
  190  3072           6744       6724       5058
  202  3264             90         87         72
  254  4096          11886      11886      11886

Total               123251     120511      55076

After Patch:
class  size  obj_allocated   obj_used pages_used
...
  190  3072           6368       6326       4776
  202  3264           2205       2197       1764
  254  4096          12624      12624      12624

Total               125655     122045      56541

Signed-off-by: Gopi Sai Teja <gopi.st@...sung.com>
---
v1 -> v2: Earlier, threshold to store uncompressed page is set
to 80% of PAGE_SIZE and now zsmalloc classes is used to set the
threshold.

 drivers/block/zram/zram_drv.c |  2 +-
 include/linux/zsmalloc.h      |  1 +
 mm/zsmalloc.c                 | 13 +++++++++++++
 3 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d70eba3..dda0ef8 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -965,7 +965,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
 		return ret;
 	}
 
-	if (unlikely(comp_len > max_zpage_size)) {
+	if (unlikely(comp_len > zs_max_zpage_size(zram->mem_pool))) {
 		if (zram_wb_enabled(zram) && allow_wb) {
 			zcomp_stream_put(zram->comp);
 			ret = write_to_bdev(zram, bvec, index, bio, &element);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 57a8e98..0b09aa5 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -54,5 +54,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 unsigned long zs_get_total_pages(struct zs_pool *pool);
 unsigned long zs_compact(struct zs_pool *pool);
 
+unsigned int zs_max_zpage_size(struct zs_pool *pool);
 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
 #endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 685049a..5b434ab 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -261,6 +261,7 @@ struct zs_pool {
 	 * and unregister_shrinker() will not Oops.
 	 */
 	bool shrinker_enabled;
+	unsigned short max_zpage_size;
 #ifdef CONFIG_ZSMALLOC_STAT
 	struct dentry *stat_dentry;
 #endif
@@ -318,6 +319,11 @@ static void init_deferred_free(struct zs_pool *pool) {}
 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
 #endif
 
+unsigned int zs_max_zpage_size(struct zs_pool *pool)
+{
+	return pool->max_zpage_size;
+}
+
 static int create_cache(struct zs_pool *pool)
 {
 	pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
@@ -2368,6 +2374,8 @@ struct zs_pool *zs_create_pool(const char *name)
 	if (create_cache(pool))
 		goto err;
 
+	pool->max_zpage_size = 0;
+
 	/*
 	 * Iterate reversely, because, size of size_class that we want to use
 	 * for merging should be larger or equal to current size.
@@ -2411,6 +2419,11 @@ struct zs_pool *zs_create_pool(const char *name)
 		class->objs_per_zspage = objs_per_zspage;
 		spin_lock_init(&class->lock);
 		pool->size_class[i] = class;
+
+		if (!pool->max_zpage_size &&
+				pages_per_zspage < objs_per_zspage)
+			pool->max_zpage_size = class->size - ZS_HANDLE_SIZE;
+
 		for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
 							fullness++)
 			INIT_LIST_HEAD(&class->fullness_list[fullness]);
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ