[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250618174014.1168640-8-david@redhat.com>
Date: Wed, 18 Jun 2025 19:39:50 +0200
From: David Hildenbrand <david@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org,
linux-doc@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org,
virtualization@...ts.linux.dev,
linux-fsdevel@...r.kernel.org,
David Hildenbrand <david@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Jonathan Corbet <corbet@....net>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Jerrin Shaji George <jerrin.shaji-george@...adcom.com>,
Arnd Bergmann <arnd@...db.de>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eugenio Pérez <eperezma@...hat.com>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>,
Jan Kara <jack@...e.cz>,
Zi Yan <ziy@...dia.com>,
Matthew Brost <matthew.brost@...el.com>,
Joshua Hahn <joshua.hahnjy@...il.com>,
Rakie Kim <rakie.kim@...com>,
Byungchul Park <byungchul@...com>,
Gregory Price <gourry@...rry.net>,
Ying Huang <ying.huang@...ux.alibaba.com>,
Alistair Popple <apopple@...dia.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Michal Hocko <mhocko@...e.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Minchan Kim <minchan@...nel.org>,
Sergey Senozhatsky <senozhatsky@...omium.org>,
Brendan Jackman <jackmanb@...gle.com>,
Johannes Weiner <hannes@...xchg.org>,
Jason Gunthorpe <jgg@...pe.ca>,
John Hubbard <jhubbard@...dia.com>,
Peter Xu <peterx@...hat.com>,
Xu Xin <xu.xin16@....com.cn>,
Chengming Zhou <chengming.zhou@...ux.dev>,
Miaohe Lin <linmiaohe@...wei.com>,
Naoya Horiguchi <nao.horiguchi@...il.com>,
Oscar Salvador <osalvador@...e.de>,
Rik van Riel <riel@...riel.com>,
Harry Yoo <harry.yoo@...cle.com>,
Qi Zheng <zhengqi.arch@...edance.com>,
Shakeel Butt <shakeel.butt@...ux.dev>
Subject: [PATCH RFC 07/29] mm/migrate: rename isolate_movable_page() to isolate_movable_ops_page()
... and start moving back to per-page things that will absolutely not be
folio things in the future. Add documentation and a comment that the
remaining folio stuff (lock, refcount) will have to be reworked as well.
While at it, convert the VM_BUG_ON() into a WARN_ON_ONCE() and handle
it gracefully (relevant with further changes), and convert a
WARN_ON_ONCE() into a VM_WARN_ON_ONCE_PAGE().
Signed-off-by: David Hildenbrand <david@...hat.com>
---
include/linux/migrate.h | 4 ++--
mm/compaction.c | 2 +-
mm/migrate.c | 39 +++++++++++++++++++++++++++++----------
3 files changed, 32 insertions(+), 13 deletions(-)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index aaa2114498d6d..c0ec7422837bd 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -69,7 +69,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
-bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode);
bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
@@ -90,7 +90,7 @@ static inline int migrate_pages(struct list_head *l, new_folio_t new,
static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private)
{ return NULL; }
-static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
{ return false; }
static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
{ return false; }
diff --git a/mm/compaction.c b/mm/compaction.c
index 3925cb61dbb8f..17455c5a4be05 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1093,7 +1093,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
locked = NULL;
}
- if (isolate_movable_page(page, mode)) {
+ if (isolate_movable_ops_page(page, mode)) {
folio = page_folio(page);
goto isolate_success;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index ea8c74d996592..6bbb455f8b593 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -51,8 +51,26 @@
#include "internal.h"
#include "swap.h"
-bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+/**
+ * isolate_movable_ops_page - isolate a movable_ops page for migration
+ * @page: The page.
+ * @mode: The isolation mode.
+ *
+ * Try to isolate a movable_ops page for migration. Will fail if the page is
+ * not a movable_ops page, if the page is already isolated for migration
+ * or if the page was just was released by its owner.
+ *
+ * Once isolated, the page cannot get freed until it is either putback
+ * or migrated.
+ *
+ * Returns true if isolation succeeded, otherwise false.
+ */
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
{
+ /*
+ * TODO: these pages will not be folios in the future. All
+ * folio dependencies will have to be removed.
+ */
struct folio *folio = folio_get_nontail_page(page);
const struct movable_operations *mops;
@@ -73,7 +91,7 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
* we use non-atomic bitops on newly allocated page flags so
* unconditionally grabbing the lock ruins page's owner side.
*/
- if (unlikely(!__folio_test_movable(folio)))
+ if (unlikely(!__PageMovable(page)))
goto out_putfolio;
/*
@@ -90,18 +108,19 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
if (unlikely(!folio_trylock(folio)))
goto out_putfolio;
- if (!folio_test_movable(folio) || folio_test_isolated(folio))
+ if (!PageMovable(page) || PageIsolated(page))
goto out_no_isolated;
- mops = folio_movable_ops(folio);
- VM_BUG_ON_FOLIO(!mops, folio);
+ mops = page_movable_ops(page);
+ if (WARN_ON_ONCE(!mops))
+ goto out_no_isolated;
- if (!mops->isolate_page(&folio->page, mode))
+ if (!mops->isolate_page(page, mode))
goto out_no_isolated;
/* Driver shouldn't use the isolated flag */
- WARN_ON_ONCE(folio_test_isolated(folio));
- folio_set_isolated(folio);
+ VM_WARN_ON_ONCE_PAGE(PageIsolated(page), page);
+ SetPageIsolated(page);
folio_unlock(folio);
return true;
@@ -175,8 +194,8 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
if (lru)
isolated = folio_isolate_lru(folio);
else
- isolated = isolate_movable_page(&folio->page,
- ISOLATE_UNEVICTABLE);
+ isolated = isolate_movable_ops_page(&folio->page,
+ ISOLATE_UNEVICTABLE);
if (!isolated)
return false;
--
2.49.0
Powered by blists - more mailing lists