lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240829102039.3455842-1-hezhongkun.hzk@bytedance.com>
Date: Thu, 29 Aug 2024 18:20:38 +0800
From: Zhongkun He <hezhongkun.hzk@...edance.com>
To: akpm@...ux-foundation.org,
	hannes@...xchg.org,
	mhocko@...nel.org
Cc: roman.gushchin@...ux.dev,
	shakeel.butt@...ux.dev,
	muchun.song@...ux.dev,
	lizefan.x@...edance.com,
	linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	cgroups@...r.kernel.org,
	Zhongkun He <hezhongkun.hzk@...edance.com>
Subject: [RFC PATCH 1/2] mm: vmscan: modify the semantics of scan_control.may_unmap to UNMAP_ANON and UNMAP_FILE

This is a preparation patch to add disable_unmap_file arg to memory.reclaim.

So far, the value of scan_control.may_unmap has only two types, true or false,
which represents whether pages can be unmapped for reclamation in the reclamation
path. It cannot distinguish between file pages and anonymous pages, so we cannot
make a more accurate choice when proactively reclaiming memory in user space. In
practical experience, mapped file pages are crucial for the operation of the program,
usually containing important executable code, data, and shared libraries, etc.
Therefore, it is necessary to make a more accurate distinction.

Signed-off-by: Zhongkun He <hezhongkun.hzk@...edance.com>
---
 mm/vmscan.c | 61 +++++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 47 insertions(+), 14 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 283e3f9d652b..50ac714cba2f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -107,8 +107,11 @@ struct scan_control {
 	/* Writepage batching in laptop mode; RECLAIM_WRITE */
 	unsigned int may_writepage:1;
 
+	#define UNMAP_ANON 1
+	#define UNMAP_FILE 2
+	#define UNMAP_ANON_AND_FILE UNMAP_ANON + UNMAP_FILE
 	/* Can mapped folios be reclaimed? */
-	unsigned int may_unmap:1;
+	unsigned int may_unmap:2;
 
 	/* Can folios be swapped as part of reclaim? */
 	unsigned int may_swap:1;
@@ -1083,8 +1086,23 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 		if (unlikely(!folio_evictable(folio)))
 			goto activate_locked;
 
-		if (!sc->may_unmap && folio_mapped(folio))
-			goto keep_locked;
+		if (folio_mapped(folio)) {
+			switch (sc->may_unmap) {
+			/* The most likely case. */
+			case UNMAP_ANON_AND_FILE:
+				break;
+			case UNMAP_ANON:
+				if (!folio_test_anon(folio))
+					goto keep_locked;
+				break;
+			case UNMAP_FILE:
+				if (folio_test_anon(folio))
+					goto keep_locked;
+				break;
+			default:
+				goto keep_locked;
+			}
+		}
 
 		/* folio_update_gen() tried to promote this page? */
 		if (lru_gen_enabled() && !ignore_references &&
@@ -1563,7 +1581,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
 {
 	struct scan_control sc = {
 		.gfp_mask = GFP_KERNEL,
-		.may_unmap = 1,
+		.may_unmap = UNMAP_ANON_AND_FILE,
 	};
 	struct reclaim_stat stat;
 	unsigned int nr_reclaimed;
@@ -1688,8 +1706,23 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
 
 		if (!folio_test_lru(folio))
 			goto move;
-		if (!sc->may_unmap && folio_mapped(folio))
-			goto move;
+
+		if (folio_mapped(folio)) {
+			switch (sc->may_unmap) {
+			case UNMAP_ANON_AND_FILE:
+				break;
+			case UNMAP_ANON:
+				if (!folio_test_anon(folio))
+					goto move;
+				break;
+			case UNMAP_FILE:
+				if (folio_test_anon(folio))
+					goto move;
+				break;
+			default:
+				goto move;
+			}
+		}
 
 		/*
 		 * Be careful not to clear the lru flag until after we're
@@ -2135,7 +2168,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
 	struct scan_control sc = {
 		.gfp_mask = GFP_KERNEL,
 		.may_writepage = 1,
-		.may_unmap = 1,
+		.may_unmap = UNMAP_ANON_AND_FILE,
 		.may_swap = 1,
 		.no_demotion = 1,
 	};
@@ -5467,7 +5500,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
 	int err = -EINVAL;
 	struct scan_control sc = {
 		.may_writepage = true,
-		.may_unmap = true,
+		.may_unmap = UNMAP_ANON_AND_FILE,
 		.may_swap = true,
 		.reclaim_idx = MAX_NR_ZONES - 1,
 		.gfp_mask = GFP_KERNEL,
@@ -6482,7 +6515,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 		.nodemask = nodemask,
 		.priority = DEF_PRIORITY,
 		.may_writepage = !laptop_mode,
-		.may_unmap = 1,
+		.may_unmap = UNMAP_ANON_AND_FILE,
 		.may_swap = 1,
 	};
 
@@ -6526,7 +6559,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
 		.target_mem_cgroup = memcg,
 		.may_writepage = !laptop_mode,
-		.may_unmap = 1,
+		.may_unmap = UNMAP_ANON_AND_FILE,
 		.reclaim_idx = MAX_NR_ZONES - 1,
 		.may_swap = !noswap,
 	};
@@ -6572,7 +6605,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 		.target_mem_cgroup = memcg,
 		.priority = DEF_PRIORITY,
 		.may_writepage = !laptop_mode,
-		.may_unmap = 1,
+		.may_unmap = UNMAP_ANON_AND_FILE,
 		.may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
 		.proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
 	};
@@ -6837,7 +6870,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
 	struct scan_control sc = {
 		.gfp_mask = GFP_KERNEL,
 		.order = order,
-		.may_unmap = 1,
+		.may_unmap = UNMAP_ANON_AND_FILE,
 	};
 
 	set_task_reclaim_state(current, &sc.reclaim_state);
@@ -7304,7 +7337,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
 		.reclaim_idx = MAX_NR_ZONES - 1,
 		.priority = DEF_PRIORITY,
 		.may_writepage = 1,
-		.may_unmap = 1,
+		.may_unmap = UNMAP_ANON_AND_FILE,
 		.may_swap = 1,
 		.hibernation_mode = 1,
 	};
@@ -7462,7 +7495,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
 		.order = order,
 		.priority = NODE_RECLAIM_PRIORITY,
 		.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
-		.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
+		.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP) ? UNMAP_ANON_AND_FILE : 0,
 		.may_swap = 1,
 		.reclaim_idx = gfp_zone(gfp_mask),
 	};
-- 
2.20.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ