[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a48bf5fadd66059bbacca6a5f2eccc60eee3278e.1761288179.git.lorenzo.stoakes@oracle.com>
Date: Fri, 24 Oct 2025 08:41:19 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Christian Borntraeger <borntraeger@...ux.ibm.com>,
Janosch Frank <frankja@...ux.ibm.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>,
David Hildenbrand <david@...hat.com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Gerald Schaefer <gerald.schaefer@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>, Vasily Gorbik <gor@...ux.ibm.com>,
Sven Schnelle <svens@...ux.ibm.com>, Zi Yan <ziy@...dia.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>,
Nico Pache <npache@...hat.com>, Ryan Roberts <ryan.roberts@....com>,
Dev Jain <dev.jain@....com>, Barry Song <baohua@...nel.org>,
Lance Yang <lance.yang@...ux.dev>,
Kemeng Shi <shikemeng@...weicloud.com>,
Kairui Song <kasong@...cent.com>, Nhat Pham <nphamcs@...il.com>,
Baoquan He <bhe@...hat.com>, Chris Li <chrisl@...nel.org>,
Peter Xu <peterx@...hat.com>, Matthew Wilcox <willy@...radead.org>,
Jason Gunthorpe <jgg@...pe.ca>, Leon Romanovsky <leon@...nel.org>,
Muchun Song <muchun.song@...ux.dev>,
Oscar Salvador <osalvador@...e.de>, Vlastimil Babka <vbabka@...e.cz>,
Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
Jann Horn <jannh@...gle.com>, Matthew Brost <matthew.brost@...el.com>,
Joshua Hahn <joshua.hahnjy@...il.com>, Rakie Kim <rakie.kim@...com>,
Byungchul Park <byungchul@...com>, Gregory Price <gourry@...rry.net>,
Ying Huang <ying.huang@...ux.alibaba.com>,
Alistair Popple <apopple@...dia.com>, Pedro Falcato <pfalcato@...e.de>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Rik van Riel <riel@...riel.com>, Harry Yoo <harry.yoo@...cle.com>,
kvm@...r.kernel.org, linux-s390@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC PATCH 03/12] mm: introduce get_pte_swap_entry() and use it
We have a number of checks which explicitly check for 'true' swap entries,
that is swap entries which are not non-swap swap entries.
This is a confusing state of affairs, and we're duplicating checks as well
as using is_swap_pte() which is applied inconsistently throughout the code
base.
Avoid all this by introducing a new function, get_pte_swap_entry() that
explicitly checks for a true swap entry and returns it if the PTE contains
one.
We then update the code base to use this function.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
---
include/linux/swapops.h | 29 +++++++++++++++++++++++++++++
mm/internal.h | 6 +++---
mm/madvise.c | 5 +----
mm/swap_state.c | 5 +----
mm/swapfile.c | 3 +--
5 files changed, 35 insertions(+), 13 deletions(-)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 24eaf8825c6b..a557b0e7f05c 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -649,5 +649,34 @@ static inline int is_pmd_non_present_folio_entry(pmd_t pmd)
return is_pmd_migration_entry(pmd) || is_pmd_device_private_entry(pmd);
}
+/**
+ * get_pte_swap_entry() - Gets PTE swap entry if one is present.
+ * @pte: The PTE we are checking.
+ * @entryp: Output pointer to a swap entry that will be populated upon
+ * success.
+ *
+ * Determines if the PTE describes an entry in swap or swap cache (i.e. is a
+ * swap entry and not a non-swap entry), if so it sets @entryp to the swap
+ * entry.
+ *
+ * This should only be used if we do not have any prior knowledge of this
+ * PTE's state.
+ *
+ * Return: true if swappable, false otherwise.
+ */
+static inline bool get_pte_swap_entry(pte_t pte, swp_entry_t *entryp)
+{
+ if (pte_present(pte))
+ return false;
+ if (pte_none(pte))
+ return false;
+
+ *entryp = pte_to_swp_entry(pte);
+ if (non_swap_entry(*entryp))
+ return false;
+
+ return true;
+}
+
#endif /* CONFIG_MMU */
#endif /* _LINUX_SWAPOPS_H */
diff --git a/mm/internal.h b/mm/internal.h
index b855a4412878..78dcf6048672 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -378,15 +378,15 @@ static inline pte_t pte_next_swp_offset(pte_t pte)
*/
static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
{
+ swp_entry_t entry;
+ const bool __maybe_unused is_swap = get_pte_swap_entry(pte, &entry);
pte_t expected_pte = pte_next_swp_offset(pte);
const pte_t *end_ptep = start_ptep + max_nr;
- swp_entry_t entry = pte_to_swp_entry(pte);
pte_t *ptep = start_ptep + 1;
unsigned short cgroup_id;
VM_WARN_ON(max_nr < 1);
- VM_WARN_ON(!is_swap_pte(pte));
- VM_WARN_ON(non_swap_entry(entry));
+ VM_WARN_ON(!is_swap);
cgroup_id = lookup_swap_cgroup_id(entry);
while (ptep < end_ptep) {
diff --git a/mm/madvise.c b/mm/madvise.c
index f9f80b2e9d43..578036ef6675 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -205,10 +205,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
}
pte = ptep_get(ptep);
- if (!is_swap_pte(pte))
- continue;
- entry = pte_to_swp_entry(pte);
- if (unlikely(non_swap_entry(entry)))
+ if (!get_pte_swap_entry(pte, &entry))
continue;
pte_unmap_unlock(ptep, ptl);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b13e9c4baa90..9199b64206ff 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -754,10 +754,7 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
break;
}
pentry = ptep_get_lockless(pte);
- if (!is_swap_pte(pentry))
- continue;
- entry = pte_to_swp_entry(pentry);
- if (unlikely(non_swap_entry(entry)))
+ if (!get_pte_swap_entry(pentry, &entry))
continue;
pte_unmap(pte);
pte = NULL;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index cb2392ed8e0e..74eb9221a220 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2253,10 +2253,9 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
ptent = ptep_get_lockless(pte);
- if (!is_swap_pte(ptent))
+ if (!get_pte_swap_entry(ptent, &entry))
continue;
- entry = pte_to_swp_entry(ptent);
if (swp_type(entry) != type)
continue;
--
2.51.0
Powered by blists - more mailing lists