lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20230618065816.1365301-1-yosryahmed@google.com>
Date:   Sun, 18 Jun 2023 06:58:16 +0000
From:   Yosry Ahmed <yosryahmed@...gle.com>
To:     Andrew Morton <akpm@...ux-foundation.org>
Cc:     Yu Zhao <yuzhao@...gle.com>,
        "Jan Alexander Steffens (heftig)" <heftig@...hlinux.org>,
        Steven Barrett <steven@...uorix.net>,
        Brian Geffon <bgeffon@...gle.com>,
        "T.J. Alumbaugh" <talumbau@...gle.com>,
        Gaosheng Cui <cuigaosheng1@...wei.com>,
        Suren Baghdasaryan <surenb@...gle.com>,
        "Matthew Wilcox (Oracle)" <willy@...radead.org>,
        "Liam R. Howlett" <Liam.Howlett@...cle.com>,
        David Hildenbrand <david@...hat.com>,
        Jason Gunthorpe <jgg@...pe.ca>,
        Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
        David Howells <dhowells@...hat.com>,
        Hugh Dickins <hughd@...gle.com>,
        Greg Thelen <gthelen@...gle.com>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, Yosry Ahmed <yosryahmed@...gle.com>
Subject: [RFC PATCH 4/5] mm/vmscan: revive the unevictable LRU

Now that mlock_count no longer overlays page->lru, revive the
unevictable LRU. No need to special case it when adding/removing a folio
to the LRUs. This also enables future work that will use the LRUs to
find all user folios charged to a memcg, having the unevictable LRU
makes sure we are not missing a significant chunk of those.

Signed-off-by: Yosry Ahmed <yosryahmed@...gle.com>
---
 include/linux/mm_inline.h | 11 +++--------
 mm/huge_memory.c          |  3 +--
 mm/mmzone.c               |  8 --------
 3 files changed, 4 insertions(+), 18 deletions(-)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 0e1d239a882c..203b8db6b4a2 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -319,8 +319,7 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
 
 	update_lru_size(lruvec, lru, folio_zonenum(folio),
 			folio_nr_pages(folio));
-	if (lru != LRU_UNEVICTABLE)
-		list_add(&folio->lru, &lruvec->lists[lru]);
+	list_add(&folio->lru, &lruvec->lists[lru]);
 }
 
 static __always_inline void add_page_to_lru_list(struct page *page,
@@ -339,21 +338,17 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
 
 	update_lru_size(lruvec, lru, folio_zonenum(folio),
 			folio_nr_pages(folio));
-	/* This is not expected to be used on LRU_UNEVICTABLE */
 	list_add_tail(&folio->lru, &lruvec->lists[lru]);
 }
 
 static __always_inline
 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
 {
-	enum lru_list lru = folio_lru_list(folio);
-
 	if (lru_gen_del_folio(lruvec, folio, false))
 		return;
 
-	if (lru != LRU_UNEVICTABLE)
-		list_del(&folio->lru);
-	update_lru_size(lruvec, lru, folio_zonenum(folio),
+	list_del(&folio->lru);
+	update_lru_size(lruvec, folio_lru_list(folio), folio_zonenum(folio),
 			-folio_nr_pages(folio));
 }
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0e5b58ca603f..4aa2f4ad8da7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2407,8 +2407,7 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
 	} else {
 		/* head is still on lru (and we have it frozen) */
 		VM_WARN_ON(!PageLRU(head));
-		if (!PageUnevictable(tail))
-			list_add_tail(&tail->lru, &head->lru);
+		list_add_tail(&tail->lru, &head->lru);
 		SetPageLRU(tail);
 	}
 }
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 68e1511be12d..7678177bd639 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -81,14 +81,6 @@ void lruvec_init(struct lruvec *lruvec)
 
 	for_each_lru(lru)
 		INIT_LIST_HEAD(&lruvec->lists[lru]);
-	/*
-	 * The "Unevictable LRU" is imaginary: though its size is maintained,
-	 * it is never scanned, and unevictable pages are not threaded on it
-	 * (so that their lru fields can be reused to hold mlock_count).
-	 * Poison its list head, so that any operations on it would crash.
-	 */
-	list_del(&lruvec->lists[LRU_UNEVICTABLE]);
-
 	lru_gen_init_lruvec(lruvec);
 }
 
-- 
2.41.0.162.gfafddb0af9-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ