[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250923174752.35701-2-shivankg@amd.com>
Date: Tue, 23 Sep 2025 17:47:36 +0000
From: Shivank Garg <shivankg@....com>
To: <akpm@...ux-foundation.org>, <david@...hat.com>
CC: <ziy@...dia.com>, <willy@...radead.org>, <matthew.brost@...el.com>,
<joshua.hahnjy@...il.com>, <rakie.kim@...com>, <byungchul@...com>,
<gourry@...rry.net>, <ying.huang@...ux.alibaba.com>, <apopple@...dia.com>,
<lorenzo.stoakes@...cle.com>, <Liam.Howlett@...cle.com>, <vbabka@...e.cz>,
<rppt@...nel.org>, <surenb@...gle.com>, <mhocko@...e.com>,
<vkoul@...nel.org>, <lucas.demarchi@...el.com>, <rdunlap@...radead.org>,
<jgg@...pe.ca>, <kuba@...nel.org>, <justonli@...omium.org>,
<ivecera@...hat.com>, <dave.jiang@...el.com>, <Jonathan.Cameron@...wei.com>,
<dan.j.williams@...el.com>, <rientjes@...gle.com>,
<Raghavendra.KodsaraThimmappa@....com>, <bharata@....com>,
<shivankg@....com>, <alirad.malek@...corp.com>, <yiannis@...corp.com>,
<weixugc@...gle.com>, <linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>
Subject: [RFC V3 1/9] mm/migrate: factor out code in move_to_new_folio() and migrate_folio_move()
From: Zi Yan <ziy@...dia.com>
No function change is intended. The factored out code will be reused in
an upcoming batched folio move function.
Signed-off-by: Zi Yan <ziy@...dia.com>
Signed-off-by: Shivank Garg <shivankg@....com>
---
mm/migrate.c | 106 ++++++++++++++++++++++++++++++++-------------------
1 file changed, 67 insertions(+), 39 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index 9e5ef39ce73a..ad03e7257847 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1061,19 +1061,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
return migrate_folio(mapping, dst, src, mode);
}
-/*
- * Move a src folio to a newly allocated dst folio.
- *
- * The src and dst folios are locked and the src folios was unmapped from
- * the page tables.
- *
- * On success, the src folio was replaced by the dst folio.
- *
- * Return value:
- * < 0 - error code
- * MIGRATEPAGE_SUCCESS - success
- */
-static int move_to_new_folio(struct folio *dst, struct folio *src,
+static int _move_to_new_folio_prep(struct folio *dst, struct folio *src,
enum migrate_mode mode)
{
struct address_space *mapping = folio_mapping(src);
@@ -1098,7 +1086,12 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
mode);
else
rc = fallback_migrate_folio(mapping, dst, src, mode);
+ return rc;
+}
+static void _move_to_new_folio_finalize(struct folio *dst, struct folio *src,
+ int rc)
+{
if (rc == MIGRATEPAGE_SUCCESS) {
/*
* For pagecache folios, src->mapping must be cleared before src
@@ -1110,6 +1103,29 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
if (likely(!folio_is_zone_device(dst)))
flush_dcache_folio(dst);
}
+}
+
+/*
+ * Move a src folio to a newly allocated dst folio.
+ *
+ * The src and dst folios are locked and the src folios was unmapped from
+ * the page tables.
+ *
+ * On success, the src folio was replaced by the dst folio.
+ *
+ * Return value:
+ * < 0 - error code
+ * MIGRATEPAGE_SUCCESS - success
+ */
+static int move_to_new_folio(struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+{
+ int rc;
+
+ rc = _move_to_new_folio_prep(dst, src, mode);
+
+ _move_to_new_folio_finalize(dst, src, rc);
+
return rc;
}
@@ -1345,32 +1361,9 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
return rc;
}
-/* Migrate the folio to the newly allocated folio in dst. */
-static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
- struct folio *src, struct folio *dst,
- enum migrate_mode mode, enum migrate_reason reason,
- struct list_head *ret)
+static void _migrate_folio_move_finalize1(struct folio *src, struct folio *dst,
+ int old_page_state)
{
- int rc;
- int old_page_state = 0;
- struct anon_vma *anon_vma = NULL;
- struct list_head *prev;
-
- __migrate_folio_extract(dst, &old_page_state, &anon_vma);
- prev = dst->lru.prev;
- list_del(&dst->lru);
-
- if (unlikely(page_has_movable_ops(&src->page))) {
- rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
- if (rc)
- goto out;
- goto out_unlock_both;
- }
-
- rc = move_to_new_folio(dst, src, mode);
- if (rc)
- goto out;
-
/*
* When successful, push dst to LRU immediately: so that if it
* turns out to be an mlocked page, remove_migration_ptes() will
@@ -1386,8 +1379,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
if (old_page_state & PAGE_WAS_MAPPED)
remove_migration_ptes(src, dst, 0);
+}
-out_unlock_both:
+static void _migrate_folio_move_finalize2(struct folio *src, struct folio *dst,
+ enum migrate_reason reason,
+ struct anon_vma *anon_vma)
+{
folio_unlock(dst);
folio_set_owner_migrate_reason(dst, reason);
/*
@@ -1407,6 +1404,37 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
put_anon_vma(anon_vma);
folio_unlock(src);
migrate_folio_done(src, reason);
+}
+
+/* Migrate the folio to the newly allocated folio in dst. */
+static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
+ struct folio *src, struct folio *dst,
+ enum migrate_mode mode, enum migrate_reason reason,
+ struct list_head *ret)
+{
+ int rc;
+ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+ struct list_head *prev;
+
+ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+ prev = dst->lru.prev;
+ list_del(&dst->lru);
+
+ if (unlikely(page_has_movable_ops(&src->page))) {
+ rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
+ if (rc)
+ goto out;
+ goto out_unlock_both;
+ }
+
+ rc = move_to_new_folio(dst, src, mode);
+ if (rc)
+ goto out;
+
+ _migrate_folio_move_finalize1(src, dst, old_page_state);
+out_unlock_both:
+ _migrate_folio_move_finalize2(src, dst, reason, anon_vma);
return rc;
out:
--
2.43.0
Powered by blists - more mailing lists