[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6e067746-9d18-4d04-a60a-536d5fee6b87@lucifer.local>
Date: Tue, 1 Jul 2025 12:30:17 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: David Hildenbrand <david@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-doc@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
virtualization@...ts.linux.dev, linux-fsdevel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
Jonathan Corbet <corbet@....net>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Jerrin Shaji George <jerrin.shaji-george@...adcom.com>,
Arnd Bergmann <arnd@...db.de>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eugenio Pérez <eperezma@...hat.com>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>, Jan Kara <jack@...e.cz>,
Zi Yan <ziy@...dia.com>, Matthew Brost <matthew.brost@...el.com>,
Joshua Hahn <joshua.hahnjy@...il.com>, Rakie Kim <rakie.kim@...com>,
Byungchul Park <byungchul@...com>, Gregory Price <gourry@...rry.net>,
Ying Huang <ying.huang@...ux.alibaba.com>,
Alistair Popple <apopple@...dia.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>, Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Minchan Kim <minchan@...nel.org>,
Sergey Senozhatsky <senozhatsky@...omium.org>,
Brendan Jackman <jackmanb@...gle.com>,
Johannes Weiner <hannes@...xchg.org>, Jason Gunthorpe <jgg@...pe.ca>,
John Hubbard <jhubbard@...dia.com>, Peter Xu <peterx@...hat.com>,
Xu Xin <xu.xin16@....com.cn>,
Chengming Zhou <chengming.zhou@...ux.dev>,
Miaohe Lin <linmiaohe@...wei.com>,
Naoya Horiguchi <nao.horiguchi@...il.com>,
Oscar Salvador <osalvador@...e.de>, Rik van Riel <riel@...riel.com>,
Harry Yoo <harry.yoo@...cle.com>,
Qi Zheng <zhengqi.arch@...edance.com>,
Shakeel Butt <shakeel.butt@...ux.dev>
Subject: Re: [PATCH v1 18/29] mm: remove __folio_test_movable()
On Mon, Jun 30, 2025 at 02:59:59PM +0200, David Hildenbrand wrote:
> Convert to page_has_movable_ops(). While at it, cleanup relevant code
> a bit.
>
> The data_race() in migrate_folio_unmap() is questionable: we already
> hold a page reference, and concurrent modifications can no longer
> happen (iow: __ClearPageMovable() no longer exists). Drop it for now,
> we'll rework page_has_movable_ops() soon either way to no longer
> rely on page->mapping.
>
> Wherever we cast from folio to page now is a clear sign that this
> code has to be decoupled.
>
> Reviewed-by: Zi Yan <ziy@...dia.com>
> Signed-off-by: David Hildenbrand <david@...hat.com>
LGTM, so:
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
> ---
> include/linux/page-flags.h | 6 ------
> mm/migrate.c | 43 ++++++++++++--------------------------
> mm/vmscan.c | 6 ++++--
> 3 files changed, 17 insertions(+), 38 deletions(-)
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index c67163b73c5ec..4c27ebb689e3c 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -744,12 +744,6 @@ static __always_inline bool PageAnon(const struct page *page)
> return folio_test_anon(page_folio(page));
> }
>
> -static __always_inline bool __folio_test_movable(const struct folio *folio)
> -{
> - return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
> - PAGE_MAPPING_MOVABLE;
> -}
> -
Woah, wait, does this mean we can remove PAGE_MAPPING_MOVABLE??
Nice!
> static __always_inline bool page_has_movable_ops(const struct page *page)
> {
> return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 587af35b7390d..15d3c1031530c 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -219,12 +219,7 @@ void putback_movable_pages(struct list_head *l)
> continue;
> }
> list_del(&folio->lru);
> - /*
> - * We isolated non-lru movable folio so here we can use
> - * __folio_test_movable because LRU folio's mapping cannot
> - * have PAGE_MAPPING_MOVABLE.
> - */
So hate these references to 'LRU' as in meaning 'pages that could be on the
LRU'.
> - if (unlikely(__folio_test_movable(folio))) {
> + if (unlikely(page_has_movable_ops(&folio->page))) {
> putback_movable_ops_page(&folio->page);
> } else {
> node_stat_mod_folio(folio, NR_ISOLATED_ANON +
> @@ -237,26 +232,20 @@ void putback_movable_pages(struct list_head *l)
> /* Must be called with an elevated refcount on the non-hugetlb folio */
> bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
> {
> - bool isolated, lru;
> -
> if (folio_test_hugetlb(folio))
> return folio_isolate_hugetlb(folio, list);
>
> - lru = !__folio_test_movable(folio);
> - if (lru)
> - isolated = folio_isolate_lru(folio);
> - else
> - isolated = isolate_movable_ops_page(&folio->page,
> - ISOLATE_UNEVICTABLE);
> -
> - if (!isolated)
> - return false;
> -
> - list_add(&folio->lru, list);
> - if (lru)
> + if (page_has_movable_ops(&folio->page)) {
> + if (!isolate_movable_ops_page(&folio->page,
> + ISOLATE_UNEVICTABLE))
> + return false;
> + } else {
> + if (!folio_isolate_lru(folio))
> + return false;
> node_stat_add_folio(folio, NR_ISOLATED_ANON +
> folio_is_file_lru(folio));
> -
> + }
> + list_add(&folio->lru, list);
> return true;
> }
>
> @@ -1140,12 +1129,7 @@ static void migrate_folio_undo_dst(struct folio *dst, bool locked,
> static void migrate_folio_done(struct folio *src,
> enum migrate_reason reason)
> {
> - /*
> - * Compaction can migrate also non-LRU pages which are
> - * not accounted to NR_ISOLATED_*. They can be recognized
> - * as __folio_test_movable
> - */
> - if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
> + if (likely(!page_has_movable_ops(&src->page)) && reason != MR_DEMOTION)
> mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
> folio_is_file_lru(src), -folio_nr_pages(src));
>
> @@ -1164,7 +1148,6 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
> int rc = -EAGAIN;
> int old_page_state = 0;
> struct anon_vma *anon_vma = NULL;
> - bool is_lru = data_race(!__folio_test_movable(src));
> bool locked = false;
> bool dst_locked = false;
>
> @@ -1265,7 +1248,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
> goto out;
> dst_locked = true;
>
> - if (unlikely(!is_lru)) {
> + if (unlikely(page_has_movable_ops(&src->page))) {
> __migrate_folio_record(dst, old_page_state, anon_vma);
> return MIGRATEPAGE_UNMAP;
> }
> @@ -1330,7 +1313,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
> prev = dst->lru.prev;
> list_del(&dst->lru);
>
> - if (unlikely(__folio_test_movable(src))) {
> + if (unlikely(page_has_movable_ops(&src->page))) {
> rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
> if (rc)
> goto out;
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 098bcc821fc74..103dfc729a823 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1658,9 +1658,11 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
> unsigned int noreclaim_flag;
>
> list_for_each_entry_safe(folio, next, folio_list, lru) {
> + /* TODO: these pages should not even appear in this list. */
> + if (page_has_movable_ops(&folio->page))
VM_WARN_ON_ONCE()?
> + continue;
> if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
> - !folio_test_dirty(folio) && !__folio_test_movable(folio) &&
> - !folio_test_unevictable(folio)) {
> + !folio_test_dirty(folio) && !folio_test_unevictable(folio)) {
> folio_clear_active(folio);
> list_move(&folio->lru, &clean_folios);
> }
> --
> 2.49.0
>
Powered by blists - more mailing lists