[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250903011900.3657435-6-balbirs@nvidia.com>
Date: Wed, 3 Sep 2025 11:18:50 +1000
From: Balbir Singh <balbirs@...dia.com>
To: linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: damon@...ts.linux.dev,
dri-devel@...ts.freedesktop.org,
Balbir Singh <balbirs@...dia.com>,
Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...hat.com>,
Zi Yan <ziy@...dia.com>,
Joshua Hahn <joshua.hahnjy@...il.com>,
Rakie Kim <rakie.kim@...com>,
Byungchul Park <byungchul@...com>,
Gregory Price <gourry@...rry.net>,
Ying Huang <ying.huang@...ux.alibaba.com>,
Alistair Popple <apopple@...dia.com>,
Oscar Salvador <osalvador@...e.de>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Nico Pache <npache@...hat.com>,
Ryan Roberts <ryan.roberts@....com>,
Dev Jain <dev.jain@....com>,
Barry Song <baohua@...nel.org>,
Lyude Paul <lyude@...hat.com>,
Danilo Krummrich <dakr@...nel.org>,
David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>,
Ralph Campbell <rcampbell@...dia.com>,
Mika Penttilä <mpenttil@...hat.com>,
Matthew Brost <matthew.brost@...el.com>,
Francois Dugast <francois.dugast@...el.com>
Subject: [v4 05/15] mm/migrate_device: handle partially mapped folios during collection
Extend migrate_vma_collect_pmd() to handle partially mapped large
folios that require splitting before migration can proceed.
During PTE walk in the collection phase, if a large folio is only
partially mapped in the migration range, it must be split to ensure
the folio is correctly migrated.
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: David Hildenbrand <david@...hat.com>
Cc: Zi Yan <ziy@...dia.com>
Cc: Joshua Hahn <joshua.hahnjy@...il.com>
Cc: Rakie Kim <rakie.kim@...com>
Cc: Byungchul Park <byungchul@...com>
Cc: Gregory Price <gourry@...rry.net>
Cc: Ying Huang <ying.huang@...ux.alibaba.com>
Cc: Alistair Popple <apopple@...dia.com>
Cc: Oscar Salvador <osalvador@...e.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Cc: Baolin Wang <baolin.wang@...ux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Cc: Nico Pache <npache@...hat.com>
Cc: Ryan Roberts <ryan.roberts@....com>
Cc: Dev Jain <dev.jain@....com>
Cc: Barry Song <baohua@...nel.org>
Cc: Lyude Paul <lyude@...hat.com>
Cc: Danilo Krummrich <dakr@...nel.org>
Cc: David Airlie <airlied@...il.com>
Cc: Simona Vetter <simona@...ll.ch>
Cc: Ralph Campbell <rcampbell@...dia.com>
Cc: Mika Penttilä <mpenttil@...hat.com>
Cc: Matthew Brost <matthew.brost@...el.com>
Cc: Francois Dugast <francois.dugast@...el.com>
Signed-off-by: Balbir Singh <balbirs@...dia.com>
---
mm/migrate_device.c | 95 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 95 insertions(+)
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index e05e14d6eacd..e58c3f9d01c8 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -54,6 +54,54 @@ static int migrate_vma_collect_hole(unsigned long start,
return 0;
}
+/**
+ * migrate_vma_split_folio - Helper function to split a(n) (m)THP folio
+ *
+ * @folio - the folio to split
+ * @fault_page - struct page associated with the fault if any
+ *
+ * Returns 0 on success
+ */
+static int migrate_vma_split_folio(struct folio *folio,
+ struct page *fault_page)
+{
+ int ret;
+ struct folio *fault_folio = fault_page ? page_folio(fault_page) : NULL;
+ struct folio *new_fault_folio = NULL;
+
+ if (folio != fault_folio) {
+ folio_get(folio);
+ folio_lock(folio);
+ }
+
+ ret = split_folio(folio);
+ if (ret) {
+ if (folio != fault_folio) {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+ return ret;
+ }
+
+ new_fault_folio = fault_page ? page_folio(fault_page) : NULL;
+
+ /*
+ * Ensure the lock is held on the correct
+ * folio after the split
+ */
+ if (!new_fault_folio) {
+ folio_unlock(folio);
+ folio_put(folio);
+ } else if (folio != new_fault_folio) {
+ folio_get(new_fault_folio);
+ folio_lock(new_fault_folio);
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+
+ return 0;
+}
+
static int migrate_vma_collect_pmd(pmd_t *pmdp,
unsigned long start,
unsigned long end,
@@ -136,6 +184,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
* page table entry. Other special swap entries are not
* migratable, and we ignore regular swapped page.
*/
+ struct folio *folio;
+
entry = pte_to_swp_entry(pte);
if (!is_device_private_entry(entry))
goto next;
@@ -147,6 +197,29 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
pgmap->owner != migrate->pgmap_owner)
goto next;
+ folio = page_folio(page);
+ if (folio_test_large(folio)) {
+ int ret;
+
+ /*
+ * The reason for finding pmd present with a
+ * large folio for the pte is partial unmaps.
+ * Split the folio now for the migration to be
+ * handled correctly
+ */
+ pte_unmap_unlock(ptep, ptl);
+ ret = migrate_vma_split_folio(folio,
+ migrate->fault_page);
+
+ if (ret) {
+ ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
+ goto next;
+ }
+
+ addr = start;
+ goto again;
+ }
+
mpfn = migrate_pfn(page_to_pfn(page)) |
MIGRATE_PFN_MIGRATE;
if (is_writable_device_private_entry(entry))
@@ -171,6 +244,28 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
pgmap->owner != migrate->pgmap_owner)
goto next;
}
+ folio = page_folio(page);
+ if (folio_test_large(folio)) {
+ int ret;
+
+ /*
+ * The reason for finding pmd present with a
+ * large folio for the pte is partial unmaps.
+ * Split the folio now for the migration to be
+ * handled correctly
+ */
+ pte_unmap_unlock(ptep, ptl);
+ ret = migrate_vma_split_folio(folio,
+ migrate->fault_page);
+
+ if (ret) {
+ ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
+ goto next;
+ }
+
+ addr = start;
+ goto again;
+ }
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
}
--
2.50.1
Powered by blists - more mailing lists