[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240704043132.28501-13-osalvador@suse.de>
Date: Thu, 4 Jul 2024 06:30:59 +0200
From: Oscar Salvador <osalvador@...e.de>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
Peter Xu <peterx@...hat.com>,
Muchun Song <muchun.song@...ux.dev>,
David Hildenbrand <david@...hat.com>,
SeongJae Park <sj@...nel.org>,
Miaohe Lin <linmiaohe@...wei.com>,
Michal Hocko <mhocko@...e.com>,
Matthew Wilcox <willy@...radead.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Oscar Salvador <osalvador@...e.de>
Subject: [PATCH 12/45] fs/proc: Enable pagemap_pmd_range to handle hugetlb vmas
PMD-mapped hugetlb vmas will also reach pagemap_pmd_range.
Add the required code so it knows how to handle those there.
Signed-off-by: Oscar Salvador <osalvador@...e.de>
---
fs/proc/task_mmu.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 4d94b6ce58dd..ec429d82b921 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1824,9 +1824,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
spinlock_t *ptl;
pte_t *pte, *orig_pte;
int err = 0;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
- ptl = pmd_trans_huge_lock(pmdp, vma);
+ ptl = pmd_huge_lock(pmdp, vma);
if (ptl) {
unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
u64 flags = 0, frame = 0;
@@ -1848,7 +1848,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pm->show_pfn)
frame = pmd_pfn(pmd) + idx;
}
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
else if (is_swap_pmd(pmd)) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
unsigned long offset;
@@ -1861,7 +1860,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
frame = swp_type(entry) |
(offset << MAX_SWAPFILES_SHIFT);
}
- flags |= PM_SWAP;
+ if (!is_vm_hugetlb_page(vma))
+ flags |= PM_SWAP;
if (pmd_swp_soft_dirty(pmd))
flags |= PM_SOFT_DIRTY;
if (pmd_swp_uffd_wp(pmd))
@@ -1869,7 +1869,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
VM_BUG_ON(!is_pmd_migration_entry(pmd));
page = pfn_swap_entry_to_page(entry);
}
-#endif
if (page) {
folio = page_folio(page);
@@ -1899,7 +1898,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
spin_unlock(ptl);
return err;
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
/*
* We can assume that @vma always points to a valid one and @end never
--
2.26.2
Powered by blists - more mailing lists