[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6bcfda48cf63eefee73671798a43e4b9aed23cc0.1762621568.git.lorenzo.stoakes@oracle.com>
Date: Sat, 8 Nov 2025 17:08:29 +0000
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Christian Borntraeger <borntraeger@...ux.ibm.com>,
Janosch Frank <frankja@...ux.ibm.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>,
David Hildenbrand <david@...hat.com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Gerald Schaefer <gerald.schaefer@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>, Vasily Gorbik <gor@...ux.ibm.com>,
Sven Schnelle <svens@...ux.ibm.com>, Peter Xu <peterx@...hat.com>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>, Jan Kara <jack@...e.cz>,
Arnd Bergmann <arnd@...db.de>, Zi Yan <ziy@...dia.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>,
Nico Pache <npache@...hat.com>, Ryan Roberts <ryan.roberts@....com>,
Dev Jain <dev.jain@....com>, Barry Song <baohua@...nel.org>,
Lance Yang <lance.yang@...ux.dev>, Muchun Song <muchun.song@...ux.dev>,
Oscar Salvador <osalvador@...e.de>, Vlastimil Babka <vbabka@...e.cz>,
Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
Matthew Brost <matthew.brost@...el.com>,
Joshua Hahn <joshua.hahnjy@...il.com>, Rakie Kim <rakie.kim@...com>,
Byungchul Park <byungchul@...com>, Gregory Price <gourry@...rry.net>,
Ying Huang <ying.huang@...ux.alibaba.com>,
Alistair Popple <apopple@...dia.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Yuanchu Xie <yuanchu@...gle.com>, Wei Xu <weixugc@...gle.com>,
Kemeng Shi <shikemeng@...weicloud.com>,
Kairui Song <kasong@...cent.com>, Nhat Pham <nphamcs@...il.com>,
Baoquan He <bhe@...hat.com>, Chris Li <chrisl@...nel.org>,
SeongJae Park <sj@...nel.org>, Matthew Wilcox <willy@...radead.org>,
Jason Gunthorpe <jgg@...pe.ca>, Leon Romanovsky <leon@...nel.org>,
Xu Xin <xu.xin16@....com.cn>,
Chengming Zhou <chengming.zhou@...ux.dev>,
Jann Horn <jannh@...gle.com>, Miaohe Lin <linmiaohe@...wei.com>,
Naoya Horiguchi <nao.horiguchi@...il.com>,
Pedro Falcato <pfalcato@...e.de>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Rik van Riel <riel@...riel.com>, Harry Yoo <harry.yoo@...cle.com>,
Hugh Dickins <hughd@...gle.com>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, linux-s390@...r.kernel.org,
linux-fsdevel@...r.kernel.org, linux-mm@...ck.org,
linux-arch@...r.kernel.org, damon@...ts.linux.dev
Subject: [PATCH v2 15/16] mm: eliminate further swapops predicates
Having converted so much of the code base to software leaf entries, we can
mop up some remaining cases.
We replace is_pfn_swap_entry(), pfn_swap_entry_to_page(),
is_writable_device_private_entry(), is_device_exclusive_entry(),
is_migration_entry(), is_writable_migration_entry(),
is_readable_migration_entry(), swp_offset_pfn() and pfn_swap_entry_folio()
with softleaf equivalents.
No functional change intended.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
---
fs/proc/task_mmu.c | 14 ++---
include/linux/leafops.h | 25 +++++++--
include/linux/swapops.h | 121 +---------------------------------------
mm/debug_vm_pgtable.c | 20 +++----
mm/hmm.c | 2 +-
mm/hugetlb.c | 2 +-
mm/ksm.c | 6 +-
mm/memory-failure.c | 6 +-
mm/memory.c | 3 +-
mm/mempolicy.c | 4 +-
mm/migrate.c | 6 +-
mm/migrate_device.c | 10 ++--
mm/mprotect.c | 8 +--
mm/page_vma_mapped.c | 8 +--
mm/pagewalk.c | 7 +--
mm/rmap.c | 9 ++-
16 files changed, 75 insertions(+), 176 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 3cdefa7546db..4deded872c46 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1940,13 +1940,13 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
if (pte_uffd_wp(pte))
flags |= PM_UFFD_WP;
} else {
- swp_entry_t entry;
+ softleaf_t entry;
if (pte_swp_soft_dirty(pte))
flags |= PM_SOFT_DIRTY;
if (pte_swp_uffd_wp(pte))
flags |= PM_UFFD_WP;
- entry = pte_to_swp_entry(pte);
+ entry = softleaf_from_pte(pte);
if (pm->show_pfn) {
pgoff_t offset;
@@ -1954,16 +1954,16 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
* For PFN swap offsets, keeping the offset field
* to be PFN only to be compatible with old smaps.
*/
- if (is_pfn_swap_entry(entry))
- offset = swp_offset_pfn(entry);
+ if (softleaf_has_pfn(entry))
+ offset = softleaf_to_pfn(entry);
else
offset = swp_offset(entry);
frame = swp_type(entry) |
(offset << MAX_SWAPFILES_SHIFT);
}
flags |= PM_SWAP;
- if (is_pfn_swap_entry(entry))
- page = pfn_swap_entry_to_page(entry);
+ if (softleaf_has_pfn(entry))
+ page = softleaf_to_page(entry);
if (softleaf_is_uffd_wp_marker(entry))
flags |= PM_UFFD_WP;
if (softleaf_is_guard_marker(entry))
@@ -2032,7 +2032,7 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr,
if (pmd_swp_uffd_wp(pmd))
flags |= PM_UFFD_WP;
VM_WARN_ON_ONCE(!pmd_is_migration_entry(pmd));
- page = pfn_swap_entry_to_page(entry);
+ page = softleaf_to_page(entry);
}
if (page) {
diff --git a/include/linux/leafops.h b/include/linux/leafops.h
index 9be9a4e8ada4..d593093ba70c 100644
--- a/include/linux/leafops.h
+++ b/include/linux/leafops.h
@@ -355,7 +355,7 @@ static inline unsigned long softleaf_to_pfn(softleaf_t entry)
VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
/* Temporary until swp_entry_t eliminated. */
- return swp_offset_pfn(entry);
+ return swp_offset(entry) & SWP_PFN_MASK;
}
/**
@@ -366,10 +366,16 @@ static inline unsigned long softleaf_to_pfn(softleaf_t entry)
*/
static inline struct page *softleaf_to_page(softleaf_t entry)
{
+ struct page *page = pfn_to_page(softleaf_to_pfn(entry));
+
VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ VM_WARN_ON_ONCE(softleaf_is_migration(entry) && !PageLocked(page));
- /* Temporary until swp_entry_t eliminated. */
- return pfn_swap_entry_to_page(entry);
+ return page;
}
/**
@@ -381,10 +387,17 @@ static inline struct page *softleaf_to_page(softleaf_t entry)
*/
static inline struct folio *softleaf_to_folio(softleaf_t entry)
{
- VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+ struct folio *folio = pfn_folio(softleaf_to_pfn(entry));
- /* Temporary until swp_entry_t eliminated. */
- return pfn_swap_entry_folio(entry);
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding folio is locked.
+ */
+ VM_WARN_ON_ONCE(softleaf_is_migration(entry) &&
+ !folio_test_locked(folio));
+
+ return folio;
}
/**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c8e6f927da48..3d02b288c15e 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -28,7 +28,7 @@
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
/*
- * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
+ * Definitions only for PFN swap entries (see leafeant_has_pfn()). To
* store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
* can use the extra bits to store other information besides PFN.
*/
@@ -66,8 +66,6 @@
#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
-static inline bool is_pfn_swap_entry(swp_entry_t entry);
-
/* Clear all flags but only keep swp_entry_t related information */
static inline pte_t pte_swp_clear_flags(pte_t pte)
{
@@ -109,17 +107,6 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
return entry.val & SWP_OFFSET_MASK;
}
-/*
- * This should only be called upon a pfn swap entry to get the PFN stored
- * in the swap entry. Please refers to is_pfn_swap_entry() for definition
- * of pfn swap entry.
- */
-static inline unsigned long swp_offset_pfn(swp_entry_t entry)
-{
- VM_BUG_ON(!is_pfn_swap_entry(entry));
- return swp_offset(entry) & SWP_PFN_MASK;
-}
-
/*
* Convert the arch-dependent pte representation of a swp_entry_t into an
* arch-independent swp_entry_t.
@@ -169,27 +156,11 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
return swp_entry(SWP_DEVICE_WRITE, offset);
}
-static inline bool is_device_private_entry(swp_entry_t entry)
-{
- int type = swp_type(entry);
- return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
-}
-
-static inline bool is_writable_device_private_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
-}
-
static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
}
-static inline bool is_device_exclusive_entry(swp_entry_t entry)
-{
- return swp_type(entry) == SWP_DEVICE_EXCLUSIVE;
-}
-
#else /* CONFIG_DEVICE_PRIVATE */
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
@@ -201,50 +172,14 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
return swp_entry(0, 0);
}
-static inline bool is_device_private_entry(swp_entry_t entry)
-{
- return false;
-}
-
-static inline bool is_writable_device_private_entry(swp_entry_t entry)
-{
- return false;
-}
-
static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
-static inline bool is_device_exclusive_entry(swp_entry_t entry)
-{
- return false;
-}
-
#endif /* CONFIG_DEVICE_PRIVATE */
#ifdef CONFIG_MIGRATION
-static inline int is_migration_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
- swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
- swp_type(entry) == SWP_MIGRATION_WRITE);
-}
-
-static inline int is_writable_migration_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
-}
-
-static inline int is_readable_migration_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
-}
-
-static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
-}
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
@@ -310,23 +245,10 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
return swp_entry(0, 0);
}
-static inline int is_migration_entry(swp_entry_t swp)
-{
- return 0;
-}
-
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte) { }
-static inline int is_writable_migration_entry(swp_entry_t entry)
-{
- return 0;
-}
-static inline int is_readable_migration_entry(swp_entry_t entry)
-{
- return 0;
-}
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
{
@@ -410,47 +332,6 @@ static inline swp_entry_t make_guard_swp_entry(void)
return make_pte_marker_entry(PTE_MARKER_GUARD);
}
-static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
-{
- struct page *p = pfn_to_page(swp_offset_pfn(entry));
-
- /*
- * Any use of migration entries may only occur while the
- * corresponding page is locked
- */
- BUG_ON(is_migration_entry(entry) && !PageLocked(p));
-
- return p;
-}
-
-static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
-{
- struct folio *folio = pfn_folio(swp_offset_pfn(entry));
-
- /*
- * Any use of migration entries may only occur while the
- * corresponding folio is locked
- */
- BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio));
-
- return folio;
-}
-
-/*
- * A pfn swap entry is a special type of swap entry that always has a pfn stored
- * in the swap offset. They can either be used to represent unaddressable device
- * memory, to restrict access to a page undergoing migration or to represent a
- * pfn which has been hwpoisoned and unmapped.
- */
-static inline bool is_pfn_swap_entry(swp_entry_t entry)
-{
- /* Make sure the swp offset can always store the needed fields */
- BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
-
- return is_migration_entry(entry) || is_device_private_entry(entry) ||
- is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
-}
-
struct page_vma_mapped_walk;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index 608d1011ce03..64db85a80558 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -844,7 +844,7 @@ static void __init pmd_softleaf_tests(struct pgtable_debug_args *args) { }
static void __init swap_migration_tests(struct pgtable_debug_args *args)
{
struct page *page;
- swp_entry_t swp;
+ softleaf_t entry;
if (!IS_ENABLED(CONFIG_MIGRATION))
return;
@@ -867,17 +867,17 @@ static void __init swap_migration_tests(struct pgtable_debug_args *args)
* be locked, otherwise it stumbles upon a BUG_ON().
*/
__SetPageLocked(page);
- swp = make_writable_migration_entry(page_to_pfn(page));
- WARN_ON(!is_migration_entry(swp));
- WARN_ON(!is_writable_migration_entry(swp));
+ entry = make_writable_migration_entry(page_to_pfn(page));
+ WARN_ON(!softleaf_is_migration(entry));
+ WARN_ON(!softleaf_is_migration_write(entry));
- swp = make_readable_migration_entry(swp_offset(swp));
- WARN_ON(!is_migration_entry(swp));
- WARN_ON(is_writable_migration_entry(swp));
+ entry = make_readable_migration_entry(swp_offset(entry));
+ WARN_ON(!softleaf_is_migration(entry));
+ WARN_ON(softleaf_is_migration_write(entry));
- swp = make_readable_migration_entry(page_to_pfn(page));
- WARN_ON(!is_migration_entry(swp));
- WARN_ON(is_writable_migration_entry(swp));
+ entry = make_readable_migration_entry(page_to_pfn(page));
+ WARN_ON(!softleaf_is_migration(entry));
+ WARN_ON(softleaf_is_migration_write(entry));
__ClearPageLocked(page);
}
diff --git a/mm/hmm.c b/mm/hmm.c
index d5c4e60fbfad..f91c38d4507a 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -265,7 +265,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
cpu_flags = HMM_PFN_VALID;
if (softleaf_is_device_private_write(entry))
cpu_flags |= HMM_PFN_WRITE;
- new_pfn_flags = swp_offset_pfn(entry) | cpu_flags;
+ new_pfn_flags = softleaf_to_pfn(entry) | cpu_flags;
goto out;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b702b161ab35..f7f18a3ea495 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5640,7 +5640,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
} else if (unlikely(softleaf_is_migration(softleaf))) {
bool uffd_wp = pte_swp_uffd_wp(entry);
- if (!is_readable_migration_entry(softleaf) && cow) {
+ if (!softleaf_is_migration_read(softleaf) && cow) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
diff --git a/mm/ksm.c b/mm/ksm.c
index 7cd19a6ce45f..b911df37f04e 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -637,14 +637,14 @@ static int break_ksm_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long en
if (pte_present(pte)) {
folio = vm_normal_folio(walk->vma, addr, pte);
} else if (!pte_none(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
+ const softleaf_t entry = softleaf_from_pte(pte);
/*
* As KSM pages remain KSM pages until freed, no need to wait
* here for migration to end.
*/
- if (is_migration_entry(entry))
- folio = pfn_swap_entry_folio(entry);
+ if (softleaf_is_migration(entry))
+ folio = softleaf_to_folio(entry);
}
/* return 1 if the page is an normal ksm page or KSM-placed zero page */
found = (folio && folio_test_ksm(folio)) || is_ksm_zero_pte(pte);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index acc35c881547..6e79da3de221 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -691,10 +691,10 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
if (pte_present(pte)) {
pfn = pte_pfn(pte);
} else {
- swp_entry_t swp = pte_to_swp_entry(pte);
+ const softleaf_t entry = softleaf_from_pte(pte);
- if (is_hwpoison_entry(swp))
- pfn = swp_offset_pfn(swp);
+ if (softleaf_is_hwpoison(entry))
+ pfn = softleaf_to_pfn(entry);
}
if (!pfn || pfn != poisoned_pfn)
diff --git a/mm/memory.c b/mm/memory.c
index ad336cbf1d88..accd275cd651 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -902,7 +902,8 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
static int try_restore_exclusive_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, pte_t orig_pte)
{
- struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte));
+ const softleaf_t entry = softleaf_from_pte(orig_pte);
+ struct page *page = softleaf_to_page(entry);
struct folio *folio = page_folio(page);
if (folio_trylock(folio)) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index dee95d5ecfd4..acb9bf89f619 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -705,7 +705,9 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
if (pte_none(ptent))
continue;
if (!pte_present(ptent)) {
- if (is_migration_entry(pte_to_swp_entry(ptent)))
+ const softleaf_t entry = softleaf_from_pte(ptent);
+
+ if (softleaf_is_migration(entry))
qp->nr_failed++;
continue;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 48f98a6c1ad2..182a5b7b2ead 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -483,7 +483,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
spinlock_t *ptl;
pte_t *ptep;
pte_t pte;
- swp_entry_t entry;
+ softleaf_t entry;
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!ptep)
@@ -495,8 +495,8 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
if (pte_none(pte) || pte_present(pte))
goto out;
- entry = pte_to_swp_entry(pte);
- if (!is_migration_entry(entry))
+ entry = softleaf_from_pte(pte);
+ if (!softleaf_is_migration(entry))
goto out;
migration_entry_wait_on_locked(entry, ptl);
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 880f26a316f8..c50abbd32f21 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -282,7 +282,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
unsigned long mpfn = 0, pfn;
struct folio *folio;
struct page *page;
- swp_entry_t entry;
+ softleaf_t entry;
pte_t pte;
pte = ptep_get(ptep);
@@ -301,11 +301,11 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
* page table entry. Other special swap entries are not
* migratable, and we ignore regular swapped page.
*/
- entry = pte_to_swp_entry(pte);
- if (!is_device_private_entry(entry))
+ entry = softleaf_from_pte(pte);
+ if (!softleaf_is_device_private(entry))
goto next;
- page = pfn_swap_entry_to_page(entry);
+ page = softleaf_to_page(entry);
pgmap = page_pgmap(page);
if (!(migrate->flags &
MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
@@ -331,7 +331,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
mpfn = migrate_pfn(page_to_pfn(page)) |
MIGRATE_PFN_MIGRATE;
- if (is_writable_device_private_entry(entry))
+ if (softleaf_is_device_private_write(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
pfn = pte_pfn(pte);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ab014ce17f9c..476a29cc89bf 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -317,11 +317,11 @@ static long change_pte_range(struct mmu_gather *tlb,
pages++;
}
} else {
- swp_entry_t entry = pte_to_swp_entry(oldpte);
+ softleaf_t entry = softleaf_from_pte(oldpte);
pte_t newpte;
- if (is_writable_migration_entry(entry)) {
- struct folio *folio = pfn_swap_entry_folio(entry);
+ if (softleaf_is_migration_write(entry)) {
+ const struct folio *folio = softleaf_to_folio(entry);
/*
* A protection check is difficult so
@@ -335,7 +335,7 @@ static long change_pte_range(struct mmu_gather *tlb,
newpte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(oldpte))
newpte = pte_swp_mksoft_dirty(newpte);
- } else if (is_writable_device_private_entry(entry)) {
+ } else if (softleaf_is_device_private_write(entry)) {
/*
* We do not preserve soft-dirtiness. See
* copy_nonpresent_pte() for explanation.
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 8137d2366722..b38a1d00c971 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -49,7 +49,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp,
if (is_migration)
return false;
} else if (!is_migration) {
- swp_entry_t entry;
+ softleaf_t entry;
/*
* Handle un-addressable ZONE_DEVICE memory.
@@ -67,9 +67,9 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp,
* For more details on device private memory see HMM
* (include/linux/hmm.h or mm/hmm.c).
*/
- entry = pte_to_swp_entry(ptent);
- if (!is_device_private_entry(entry) &&
- !is_device_exclusive_entry(entry))
+ entry = softleaf_from_pte(ptent);
+ if (!softleaf_is_device_private(entry) &&
+ !softleaf_is_device_exclusive(entry))
return false;
}
spin_lock(*ptlp);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 3067feb970d1..d6e29da60d09 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -1000,11 +1000,10 @@ struct folio *folio_walk_start(struct folio_walk *fw,
goto found;
}
} else if (!pte_none(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
+ const softleaf_t entry = softleaf_from_pte(pte);
- if ((flags & FW_MIGRATION) &&
- is_migration_entry(entry)) {
- page = pfn_swap_entry_to_page(entry);
+ if ((flags & FW_MIGRATION) && softleaf_is_migration(entry)) {
+ page = softleaf_to_page(entry);
expose_page = false;
goto found;
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 775710115a41..345466ad396b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1969,7 +1969,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
if (likely(pte_present(pteval))) {
pfn = pte_pfn(pteval);
} else {
- pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+ pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
}
@@ -2368,7 +2368,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (likely(pte_present(pteval))) {
pfn = pte_pfn(pteval);
} else {
- pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+ pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
}
@@ -2453,8 +2453,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
folio_mark_dirty(folio);
writable = pte_write(pteval);
} else {
+ const softleaf_t entry = softleaf_from_pte(pteval);
+
pte_clear(mm, address, pvmw.pte);
- writable = is_writable_device_private_entry(pte_to_swp_entry(pteval));
+
+ writable = softleaf_is_device_private_write(entry);
}
VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) &&
--
2.51.0
Powered by blists - more mailing lists