[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251231043154.42931-4-jniethe@nvidia.com>
Date: Wed, 31 Dec 2025 15:31:49 +1100
From: Jordan Niethe <jniethe@...dia.com>
To: linux-mm@...ck.org
Cc: balbirs@...dia.com,
matthew.brost@...el.com,
akpm@...ux-foundation.org,
linux-kernel@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
david@...hat.com,
ziy@...dia.com,
apopple@...dia.com,
lorenzo.stoakes@...cle.com,
lyude@...hat.com,
dakr@...nel.org,
airlied@...il.com,
simona@...ll.ch,
rcampbell@...dia.com,
mpenttil@...hat.com,
jgg@...dia.com,
willy@...radead.org,
linuxppc-dev@...ts.ozlabs.org,
intel-xe@...ts.freedesktop.org,
jgg@...pe.ca,
Felix.Kuehling@....com
Subject: [PATCH v1 3/8] mm: Add helpers to create migration entries from struct pages
To create a new migration entry for a given struct page, that page is
first converted to its pfn, before passing the pfn to
make_readable_migration_entry() (and friends).
A future change will remove device private pages from the physical
address space. This will mean that device private pages no longer have a
pfn and must be handled separately.
Prepare for this with a new set of helpers:
- make_readable_migration_entry_from_page()
- make_readable_exclusive_migration_entry_from_page()
- make_writable_migration_entry_from_page()
These helpers take a struct page as parameter instead of a pfn. This
will allow more flexibility for handling the swap offset field
differently for device private pages.
Signed-off-by: Jordan Niethe <jniethe@...dia.com>
---
v1:
- New to series
---
include/linux/swapops.h | 25 +++++++++++++++++++++++++
mm/huge_memory.c | 18 +++++++++---------
mm/migrate_device.c | 12 ++++++------
mm/rmap.c | 12 ++++++------
4 files changed, 46 insertions(+), 21 deletions(-)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 8cfc966eae48..72aa636fdb48 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -173,16 +173,31 @@ static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
return swp_entry(SWP_MIGRATION_READ, offset);
}
+static inline swp_entry_t make_readable_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(SWP_MIGRATION_READ, page_to_pfn(page));
+}
+
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
}
+static inline swp_entry_t make_readable_exclusive_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, page_to_pfn(page));
+}
+
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
return swp_entry(SWP_MIGRATION_WRITE, offset);
}
+static inline swp_entry_t make_writable_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(SWP_MIGRATION_WRITE, page_to_pfn(page));
+}
+
/*
* Returns whether the host has large enough swap offset field to support
* carrying over pgtable A/D bits for page migrations. The result is
@@ -222,11 +237,21 @@ static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
return swp_entry(0, 0);
}
+static inline swp_entry_t make_readable_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(0, 0);
+}
+
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
+static inline swp_entry_t make_readable_exclusive_migration_entry_from_page(struct page *page)
+{
+ return swp_entry(0, 0);
+}
+
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 40cf59301c21..08c68e2e3f06 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3183,14 +3183,14 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
if (write)
- swp_entry = make_writable_migration_entry(
- page_to_pfn(page + i));
+ swp_entry = make_writable_migration_entry_from_page(
+ page + i);
else if (anon_exclusive)
- swp_entry = make_readable_exclusive_migration_entry(
- page_to_pfn(page + i));
+ swp_entry = make_readable_exclusive_migration_entry_from_page(
+ page + i);
else
- swp_entry = make_readable_migration_entry(
- page_to_pfn(page + i));
+ swp_entry = make_readable_migration_entry_from_page(
+ page + i);
if (young)
swp_entry = make_migration_entry_young(swp_entry);
if (dirty)
@@ -4890,11 +4890,11 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
if (pmd_dirty(pmdval))
folio_mark_dirty(folio);
if (pmd_write(pmdval))
- entry = make_writable_migration_entry(page_to_pfn(page));
+ entry = make_writable_migration_entry_from_page(page);
else if (anon_exclusive)
- entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
+ entry = make_readable_exclusive_migration_entry_from_page(page);
else
- entry = make_readable_migration_entry(page_to_pfn(page));
+ entry = make_readable_migration_entry_from_page(page);
if (pmd_young(pmdval))
entry = make_migration_entry_young(entry);
if (pmd_dirty(pmdval))
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 5d108ddf1a97..7eef21d63364 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -438,14 +438,14 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
/* Setup special migration page table entry */
if (mpfn & MIGRATE_PFN_WRITE)
- entry = make_writable_migration_entry(
- page_to_pfn(page));
+ entry = make_writable_migration_entry_from_page(
+ page);
else if (anon_exclusive)
- entry = make_readable_exclusive_migration_entry(
- page_to_pfn(page));
+ entry = make_readable_exclusive_migration_entry_from_page(
+ page);
else
- entry = make_readable_migration_entry(
- page_to_pfn(page));
+ entry = make_readable_migration_entry_from_page(
+ page);
if (pte_present(pte)) {
if (pte_young(pte))
entry = make_migration_entry_young(entry);
diff --git a/mm/rmap.c b/mm/rmap.c
index 79a2478b4aa9..bb881b0c4b06 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2539,14 +2539,14 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* pte is removed and then restart fault handling.
*/
if (writable)
- entry = make_writable_migration_entry(
- page_to_pfn(subpage));
+ entry = make_writable_migration_entry_from_page(
+ subpage);
else if (anon_exclusive)
- entry = make_readable_exclusive_migration_entry(
- page_to_pfn(subpage));
+ entry = make_readable_exclusive_migration_entry_from_page(
+ subpage);
else
- entry = make_readable_migration_entry(
- page_to_pfn(subpage));
+ entry = make_readable_migration_entry_from_page(
+ subpage);
if (likely(pte_present(pteval))) {
if (pte_young(pteval))
entry = make_migration_entry_young(entry);
--
2.34.1
Powered by blists - more mailing lists