lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20231020084358.463846-3-link@vivo.com>
Date:   Fri, 20 Oct 2023 16:43:51 +0800
From:   Huan Yang <link@...o.com>
To:     Yu Zhao <yuzhao@...gle.com>, Steven Rostedt <rostedt@...dmis.org>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Huan Yang <link@...o.com>,
        Suren Baghdasaryan <surenb@...gle.com>,
        Vlastimil Babka <vbabka@...e.cz>, linux-kernel@...r.kernel.org,
        linux-trace-kernel@...r.kernel.org, linux-mm@...ck.org
Cc:     opensource.kernel@...o.com
Subject: [PATCH 2/2] mm: multi-gen LRU: move promoted folio out of lock

With nr_prmote trace, show that here are many folio
promoted before shrink check.

I just test by below cmd, and grep nr_reclaimed=0:
```
trace-cmd record -e vmscan:mm_vmscan_lru_shrink_inactive\
         stressapptest -M 8096 -s 120 -m 1 -W
trace-cmd report | grep "nr_reclaim\=0"
```
Then find many show like below:
```
<...>-9042  [001]    43.290759: mm_vmscan_lru_shrink_inactive:
nid=0 nr_scanned=64 nr_reclaimed=0 nr_dirty=0 nr_writeback=0
nr_congested=0 nr_immediate=0 nr_activate_anon=0 nr_activate_file=1
nr_ref_keep=0 nr_unmap_fail=0 nr_promote=63 flags=RECLAIM_WB_FILE|RECLAIM_WB_ASYNC
```

Many scanned folio is promoted ahead. So, this promoted check better
checked before trylock folio.

And, now that promoted alread checked before touch, no need to trace it
anymore, remove this trace.

Signed-off-by: Huan Yang <link@...o.com>
---
 include/linux/vmstat.h        |  1 -
 include/trace/events/vmscan.h |  6 ++----
 mm/vmscan.c                   | 12 +++++-------
 3 files changed, 7 insertions(+), 12 deletions(-)

diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index a3710ce08434..fed855bae6d8 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -32,7 +32,6 @@ struct reclaim_stat {
 	unsigned nr_ref_keep;
 	unsigned nr_unmap_fail;
 	unsigned nr_lazyfree_fail;
-	unsigned nr_promote;
 };
 
 enum writeback_stat_item {
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index ffcf288879e0..41964d6e8dd1 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -366,7 +366,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
 		__field(unsigned int, nr_activate1)
 		__field(unsigned long, nr_ref_keep)
 		__field(unsigned long, nr_unmap_fail)
-		__field(unsigned long, nr_promote)
 		__field(int, priority)
 		__field(int, reclaim_flags)
 	),
@@ -383,20 +382,19 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
 		__entry->nr_activate1 = stat->nr_activate[1];
 		__entry->nr_ref_keep = stat->nr_ref_keep;
 		__entry->nr_unmap_fail = stat->nr_unmap_fail;
-		__entry->nr_promote = stat->nr_promote;
 		__entry->priority = priority;
 		__entry->reclaim_flags = trace_reclaim_flags(file);
 	),
 
 	TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate_anon=%d"
-	" nr_activate_file=%d nr_ref_keep=%ld nr_unmap_fail=%ld nr_promote=%ld priority=%d flags=%s",
+	" nr_activate_file=%d nr_ref_keep=%ld nr_unmap_fail=%ld priority=%d flags=%s",
 		__entry->nid,
 		__entry->nr_scanned, __entry->nr_reclaimed,
 		__entry->nr_dirty, __entry->nr_writeback,
 		__entry->nr_congested, __entry->nr_immediate,
 		__entry->nr_activate0, __entry->nr_activate1,
 		__entry->nr_ref_keep, __entry->nr_unmap_fail,
-		__entry->nr_promote, __entry->priority,
+		__entry->priority,
 		show_reclaim_flags(__entry->reclaim_flags))
 );
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fb5df298c955..98a7b0f738bd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1045,6 +1045,11 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 		folio = lru_to_folio(folio_list);
 		list_del(&folio->lru);
 
+		/* folio_update_gen() tried to promote this page? */
+		if (lru_gen_enabled() && !ignore_references &&
+		    folio_mapped(folio) && folio_test_referenced(folio))
+			goto keep;
+
 		if (!folio_trylock(folio))
 			goto keep;
 
@@ -1061,13 +1066,6 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 		if (!sc->may_unmap && folio_mapped(folio))
 			goto keep_locked;
 
-		/* folio_update_gen() tried to promote this page? */
-		if (lru_gen_enabled() && !ignore_references &&
-		    folio_mapped(folio) && folio_test_referenced(folio)) {
-			stat->nr_promote += nr_pages;
-			goto keep_locked;
-		}
-
 		/*
 		 * The number of dirty pages determines if a node is marked
 		 * reclaim_congested. kswapd will stall and start writing
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ