lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 17 Apr 2017 17:06:20 -0700 (PDT)
From:   David Rientjes <rientjes@...gle.com>
To:     Andrew Morton <akpm@...ux-foundation.org>
cc:     Johannes Weiner <hannes@...xchg.org>,
        Mel Gorman <mgorman@...hsingularity.net>,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: [patch] mm, vmscan: avoid thrashing anon lru when free + file is
 low

The purpose of the code that commit 623762517e23 ("revert 'mm: vmscan: do
not swap anon pages just because free+file is low'") reintroduces is to
prefer swapping anonymous memory rather than trashing the file lru.

If all anonymous memory is unevictable, however, this insistance on
SCAN_ANON ends up thrashing that lru instead.

Check that enough evictable anon memory is actually on this lruvec before
insisting on SCAN_ANON.  SWAP_CLUSTER_MAX is used as the threshold to
determine if only scanning anon is beneficial.

Otherwise, fallback to balanced reclaim so the file lru doesn't remain
untouched.

Signed-off-by: David Rientjes <rientjes@...gle.com>
---
 mm/vmscan.c | 41 +++++++++++++++++++++++------------------
 1 file changed, 23 insertions(+), 18 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2186,26 +2186,31 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
 	 * anon pages.  Try to detect this based on file LRU size.
 	 */
 	if (global_reclaim(sc)) {
-		unsigned long pgdatfile;
-		unsigned long pgdatfree;
-		int z;
-		unsigned long total_high_wmark = 0;
-
-		pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
-		pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
-			   node_page_state(pgdat, NR_INACTIVE_FILE);
-
-		for (z = 0; z < MAX_NR_ZONES; z++) {
-			struct zone *zone = &pgdat->node_zones[z];
-			if (!managed_zone(zone))
-				continue;
+		anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, sc->reclaim_idx) +
+		       lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx);
+		if (likely(anon >= SWAP_CLUSTER_MAX)) {
+			unsigned long total_high_wmark = 0;
+			unsigned long pgdatfile;
+			unsigned long pgdatfree;
+			int z;
+
+			pgdatfree = sum_zone_node_page_state(pgdat->node_id,
+							     NR_FREE_PAGES);
+			pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
+				    node_page_state(pgdat, NR_INACTIVE_FILE);
+
+			for (z = 0; z < MAX_NR_ZONES; z++) {
+				struct zone *zone = &pgdat->node_zones[z];
+				if (!managed_zone(zone))
+					continue;
 
-			total_high_wmark += high_wmark_pages(zone);
-		}
+				total_high_wmark += high_wmark_pages(zone);
+			}
 
-		if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
-			scan_balance = SCAN_ANON;
-			goto out;
+			if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
+				scan_balance = SCAN_ANON;
+				goto out;
+			}
 		}
 	}
 

Powered by blists - more mailing lists