[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <99ac314a609ad7c4cd0c1cf40db7d25d1c5ad65a.1761288179.git.lorenzo.stoakes@oracle.com>
Date: Fri, 24 Oct 2025 08:41:26 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Christian Borntraeger <borntraeger@...ux.ibm.com>,
Janosch Frank <frankja@...ux.ibm.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>,
David Hildenbrand <david@...hat.com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Gerald Schaefer <gerald.schaefer@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>, Vasily Gorbik <gor@...ux.ibm.com>,
Sven Schnelle <svens@...ux.ibm.com>, Zi Yan <ziy@...dia.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>,
Nico Pache <npache@...hat.com>, Ryan Roberts <ryan.roberts@....com>,
Dev Jain <dev.jain@....com>, Barry Song <baohua@...nel.org>,
Lance Yang <lance.yang@...ux.dev>,
Kemeng Shi <shikemeng@...weicloud.com>,
Kairui Song <kasong@...cent.com>, Nhat Pham <nphamcs@...il.com>,
Baoquan He <bhe@...hat.com>, Chris Li <chrisl@...nel.org>,
Peter Xu <peterx@...hat.com>, Matthew Wilcox <willy@...radead.org>,
Jason Gunthorpe <jgg@...pe.ca>, Leon Romanovsky <leon@...nel.org>,
Muchun Song <muchun.song@...ux.dev>,
Oscar Salvador <osalvador@...e.de>, Vlastimil Babka <vbabka@...e.cz>,
Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
Jann Horn <jannh@...gle.com>, Matthew Brost <matthew.brost@...el.com>,
Joshua Hahn <joshua.hahnjy@...il.com>, Rakie Kim <rakie.kim@...com>,
Byungchul Park <byungchul@...com>, Gregory Price <gourry@...rry.net>,
Ying Huang <ying.huang@...ux.alibaba.com>,
Alistair Popple <apopple@...dia.com>, Pedro Falcato <pfalcato@...e.de>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Rik van Riel <riel@...riel.com>, Harry Yoo <harry.yoo@...cle.com>,
kvm@...r.kernel.org, linux-s390@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC PATCH 10/12] mm: remove remaining is_swap_pmd() users and is_swap_pmd()
Update copy_huge_pmd() and change_huge_pmd() to use
is_pmd_non_present_folio_entry() - as this checks for the only valid
non-present huge PMD states.
Also update mm/debug_vm_pgtable.c to explicitly test for a valid
non-present PMD entry (which it was not before, which was incorrect), and
have it test against is_huge_pmd() and is_pmd_non_present_folio_entry()
rather than is_swap_pmd().
With these changes done there are no further users of is_swap_pmd(), so
remove it.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
---
include/linux/huge_mm.h | 9 ---------
mm/debug_vm_pgtable.c | 25 +++++++++++++++----------
mm/huge_memory.c | 5 +++--
3 files changed, 18 insertions(+), 21 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 892cb825dfc7..0c3a002dc10f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -484,11 +484,6 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
-static inline int is_swap_pmd(pmd_t pmd)
-{
- return !pmd_none(pmd) && !pmd_present(pmd);
-}
-
/* mmap_lock must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
@@ -684,10 +679,6 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
struct vm_area_struct *next)
{
}
-static inline int is_swap_pmd(pmd_t pmd)
-{
- return 0;
-}
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index d4b2835569ce..5b8b0024a492 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -74,6 +74,7 @@ struct pgtable_debug_args {
unsigned long fixed_pte_pfn;
swp_entry_t swp_entry;
+ swp_entry_t non_present_swp_entry;
};
static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
@@ -731,7 +732,7 @@ static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
}
-static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
+static void __init pmd_non_present_soft_dirty_tests(struct pgtable_debug_args *args)
{
pmd_t pmd;
@@ -743,15 +744,16 @@ static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
return;
pr_debug("Validating PMD swap soft dirty\n");
- pmd = swp_entry_to_pmd(args->swp_entry);
- WARN_ON(!is_swap_pmd(pmd));
+ pmd = swp_entry_to_pmd(args->non_present_swp_entry);
+ WARN_ON(!is_huge_pmd(pmd));
+ WARN_ON(!is_pmd_non_present_folio_entry(pmd));
WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
}
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
-static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
+static void __init pmd_non_present_soft_dirty_tests(struct pgtable_debug_args *args) { }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
@@ -796,7 +798,7 @@ static void __init pte_swap_tests(struct pgtable_debug_args *args)
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-static void __init pmd_swap_tests(struct pgtable_debug_args *args)
+static void __init pmd_non_present_tests(struct pgtable_debug_args *args)
{
swp_entry_t arch_entry;
pmd_t pmd1, pmd2;
@@ -805,15 +807,16 @@ static void __init pmd_swap_tests(struct pgtable_debug_args *args)
return;
pr_debug("Validating PMD swap\n");
- pmd1 = swp_entry_to_pmd(args->swp_entry);
- WARN_ON(!is_swap_pmd(pmd1));
+ pmd1 = swp_entry_to_pmd(args->non_present_swp_entry);
+ WARN_ON(!is_huge_pmd(pmd1));
+ WARN_ON(!is_pmd_non_present_folio_entry(pmd1));
arch_entry = __pmd_to_swp_entry(pmd1);
pmd2 = __swp_entry_to_pmd(arch_entry);
WARN_ON(memcmp(&pmd1, &pmd2, sizeof(pmd1)));
}
#else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
-static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
+static void __init pmd_non_present_tests(struct pgtable_debug_args *args) { }
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
static void __init swap_migration_tests(struct pgtable_debug_args *args)
@@ -1207,6 +1210,8 @@ static int __init init_args(struct pgtable_debug_args *args)
max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
/* Create a swp entry with all possible bits set while still being swap. */
args->swp_entry = swp_entry(MAX_SWAPFILES - 1, max_swap_offset);
+ /* Create a non-present migration entry. */
+ args->non_present_swp_entry = make_writable_migration_entry(~0UL);
/*
* Allocate (huge) pages because some of the tests need to access
@@ -1296,12 +1301,12 @@ static int __init debug_vm_pgtable(void)
pte_soft_dirty_tests(&args);
pmd_soft_dirty_tests(&args);
pte_swap_soft_dirty_tests(&args);
- pmd_swap_soft_dirty_tests(&args);
+ pmd_non_present_soft_dirty_tests(&args);
pte_swap_exclusive_tests(&args);
pte_swap_tests(&args);
- pmd_swap_tests(&args);
+ pmd_non_present_tests(&args);
swap_migration_tests(&args);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index fa928ca42b6d..a16da67684b4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1874,7 +1874,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = -EAGAIN;
pmd = *src_pmd;
- if (unlikely(thp_migration_supported() && is_swap_pmd(pmd))) {
+ if (unlikely(thp_migration_supported() &&
+ is_pmd_non_present_folio_entry(pmd))) {
copy_huge_non_present_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr,
dst_vma, src_vma, pmd, pgtable);
ret = 0;
@@ -2562,7 +2563,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (!ptl)
return 0;
- if (thp_migration_supported() && is_swap_pmd(*pmd)) {
+ if (thp_migration_supported() && is_pmd_non_present_folio_entry(*pmd)) {
change_non_present_huge_pmd(mm, addr, pmd, uffd_wp,
uffd_wp_resolve);
goto unlock;
--
2.51.0
Powered by blists - more mailing lists