lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230615034830.1361853-1-hezhongkun.hzk@bytedance.com>
Date:   Thu, 15 Jun 2023 11:48:30 +0800
From:   Zhongkun He <hezhongkun.hzk@...edance.com>
To:     minchan@...nel.org, senozhatsky@...omium.org, mhocko@...e.com
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        Zhongkun He <hezhongkun.hzk@...edance.com>
Subject: [RFC PATCH 1/3] zram: charge the compressed RAM to the page's memcgroup

The compressed RAM is currently charged to kernel, not to
any memory cgroup, which is not satisfy our usage scenario.
if the memory of a task is limited by memcgroup, it will
swap out the memory to zram swap device when the memory
is insufficient. In that case, the memory limit will have
no effect.

So, it should makes sense to charge the compressed RAM to
the page's memory cgroup.

Signed-off-by: Zhongkun He <hezhongkun.hzk@...edance.com>
---
 drivers/block/zram/zram_drv.c | 22 ++++++++++++++++------
 1 file changed, 16 insertions(+), 6 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f6d90f1ba5cf..03b508447473 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -33,6 +33,7 @@
 #include <linux/debugfs.h>
 #include <linux/cpuhotplug.h>
 #include <linux/part_stat.h>
+#include <linux/memcontrol.h>
 
 #include "zram_drv.h"
 
@@ -1419,6 +1420,10 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 	struct zcomp_strm *zstrm;
 	unsigned long element = 0;
 	enum zram_pageflags flags = 0;
+	struct mem_cgroup *memcg, *old_memcg;
+
+	memcg = page_memcg(page);
+	old_memcg = set_active_memcg(memcg);
 
 	mem = kmap_atomic(page);
 	if (page_same_filled(mem, &element)) {
@@ -1426,7 +1431,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 		/* Free memory associated with this sector now. */
 		flags = ZRAM_SAME;
 		atomic64_inc(&zram->stats.same_pages);
-		goto out;
+		goto out_free;
 	}
 	kunmap_atomic(mem);
 
@@ -1440,7 +1445,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
 		pr_err("Compression failed! err=%d\n", ret);
 		zs_free(zram->mem_pool, handle);
-		return ret;
+		goto out;
 	}
 
 	if (comp_len >= huge_class_size)
@@ -1470,8 +1475,10 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 		handle = zs_malloc(zram->mem_pool, comp_len,
 				GFP_NOIO | __GFP_HIGHMEM |
 				__GFP_MOVABLE);
-		if (IS_ERR_VALUE(handle))
-			return PTR_ERR((void *)handle);
+		if (IS_ERR_VALUE(handle)) {
+			ret = PTR_ERR((void *)handle);
+			goto out;
+		}
 
 		if (comp_len != PAGE_SIZE)
 			goto compress_again;
@@ -1491,7 +1498,8 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
 		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
 		zs_free(zram->mem_pool, handle);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out;
 	}
 
 	dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
@@ -1506,7 +1514,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 	zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
 	zs_unmap_object(zram->mem_pool, handle);
 	atomic64_add(comp_len, &zram->stats.compr_data_size);
-out:
+out_free:
 	/*
 	 * Free memory associated with this sector
 	 * before overwriting unused sectors.
@@ -1531,6 +1539,8 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 
 	/* Update stats */
 	atomic64_inc(&zram->stats.pages_stored);
+out:
+	set_active_memcg(old_memcg);
 	return ret;
 }
 
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ