[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240704043132.28501-24-osalvador@suse.de>
Date: Thu, 4 Jul 2024 06:31:10 +0200
From: Oscar Salvador <osalvador@...e.de>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
Peter Xu <peterx@...hat.com>,
Muchun Song <muchun.song@...ux.dev>,
David Hildenbrand <david@...hat.com>,
SeongJae Park <sj@...nel.org>,
Miaohe Lin <linmiaohe@...wei.com>,
Michal Hocko <mhocko@...e.com>,
Matthew Wilcox <willy@...radead.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Oscar Salvador <osalvador@...e.de>
Subject: [PATCH 23/45] mm/mempolicy: Create queue_folios_pud to handle PUD-mapped hugetlb vmas
Normal THP cannot be PUD-mapped (besides devmap), but hugetlb can, so create
queue_folios_pud in order to handle PUD-mapped hugetlb vmas.
Also implement is_pud_migration_entry and pud_folio, as they will be used in this patch.
Signed-off-by: Oscar Salvador <osalvador@...e.de>
---
include/linux/pgtable.h | 1 +
include/linux/swapops.h | 12 ++++++++++++
mm/mempolicy.c | 32 ++++++++++++++++++++++++++++++++
3 files changed, 45 insertions(+)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 458e3cbc96b2..23d51fec81ac 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -51,6 +51,7 @@
#endif
#define pmd_folio(pmd) page_folio(pmd_page(pmd))
+#define pud_folio(pud) page_folio(pud_page(pud))
/*
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 182957f0d013..a23900961d11 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -542,6 +542,18 @@ static inline bool is_pfn_swap_entry(swp_entry_t entry)
struct page_vma_mapped_walk;
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int is_pud_migration_entry(pud_t pud)
+{
+ return is_swap_pud(pud) && is_migration_entry(pud_to_swp_entry(pud));
+}
+#else
+static inline int is_pud_migration_entry(pud_t pud)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 5baf29da198c..93b14090d484 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -501,6 +501,37 @@ static inline bool queue_folio_required(struct folio *folio,
return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
}
+static int queue_folios_pud(pud_t *pud, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ spinlock_t *ptl;
+ struct folio *folio;
+ struct vm_area_struct *vma = walk->vma;
+ struct queue_pages *qp = walk->private;
+
+ ptl = pud_huge_lock(pud, vma);
+ if (!ptl)
+ return 0;
+
+ if (unlikely(is_pud_migration_entry(*pud))) {
+ qp->nr_failed++;
+ goto out;
+ }
+ folio = pud_folio(*pud);
+ if (!queue_folio_required(folio, qp))
+ goto out;
+ if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
+ !vma_migratable(walk->vma) ||
+ !migrate_folio_add(folio, qp->pagelist, qp->flags, walk->vma, false))
+ qp->nr_failed++;
+
+ spin_unlock(ptl);
+out:
+ if (qp->nr_failed && strictly_unmovable(qp->flags))
+ return -EIO;
+ return 0;
+}
+
static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
{
struct folio *folio;
@@ -730,6 +761,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
static const struct mm_walk_ops queue_pages_walk_ops = {
.hugetlb_entry = queue_folios_hugetlb,
+ .pud_entry = queue_folios_pud,
.pmd_entry = queue_folios_pte_range,
.test_walk = queue_pages_test_walk,
.walk_lock = PGWALK_RDLOCK,
--
2.26.2
Powered by blists - more mailing lists