[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250730092139.3890844-3-balbirs@nvidia.com>
Date: Wed, 30 Jul 2025 19:21:30 +1000
From: Balbir Singh <balbirs@...dia.com>
To: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org,
Balbir Singh <balbirs@...dia.com>,
Karol Herbst <kherbst@...hat.com>,
Lyude Paul <lyude@...hat.com>,
Danilo Krummrich <dakr@...nel.org>,
David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>,
Jérôme Glisse <jglisse@...hat.com>,
Shuah Khan <shuah@...nel.org>,
David Hildenbrand <david@...hat.com>,
Barry Song <baohua@...nel.org>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Ryan Roberts <ryan.roberts@....com>,
Matthew Wilcox <willy@...radead.org>,
Peter Xu <peterx@...hat.com>,
Zi Yan <ziy@...dia.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>,
Jane Chu <jane.chu@...cle.com>,
Alistair Popple <apopple@...dia.com>,
Donet Tom <donettom@...ux.ibm.com>,
Mika Penttilä <mpenttil@...hat.com>,
Matthew Brost <matthew.brost@...el.com>,
Francois Dugast <francois.dugast@...el.com>,
Ralph Campbell <rcampbell@...dia.com>
Subject: [v2 02/11] mm/thp: zone_device awareness in THP handling code
Make THP handling code in the mm subsystem for THP pages aware of zone
device pages. Although the code is designed to be generic when it comes
to handling splitting of pages, the code is designed to work for THP
page sizes corresponding to HPAGE_PMD_NR.
Modify page_vma_mapped_walk() to return true when a zone device huge
entry is present, enabling try_to_migrate() and other code migration
paths to appropriately process the entry. page_vma_mapped_walk() will
return true for zone device private large folios only when
PVMW_THP_DEVICE_PRIVATE is passed. This is to prevent locations that are
not zone device private pages from having to add awareness. The key
callback that needs this flag is try_to_migrate_one(). The other
callbacks page idle, damon use it for setting young/dirty bits, which is
not significant when it comes to pmd level bit harvesting.
pmd_pfn() does not work well with zone device entries, use
pfn_pmd_entry_to_swap() for checking and comparison as for zone device
entries.
Zone device private entries when split via munmap go through pmd split,
but need to go through a folio split, deferred split does not work if a
fault is encountered because fault handling involves migration entries
(via folio_migrate_mapping) and the folio sizes are expected to be the
same there. This introduces the need to split the folio while handling
the pmd split. Because the folio is still mapped, but calling
folio_split() will cause lock recursion, the __split_unmapped_folio()
code is used with a new helper to wrap the code
split_device_private_folio(), which skips the checks around
folio->mapping, swapcache and the need to go through unmap and remap
folio.
Cc: Karol Herbst <kherbst@...hat.com>
Cc: Lyude Paul <lyude@...hat.com>
Cc: Danilo Krummrich <dakr@...nel.org>
Cc: David Airlie <airlied@...il.com>
Cc: Simona Vetter <simona@...ll.ch>
Cc: "Jérôme Glisse" <jglisse@...hat.com>
Cc: Shuah Khan <shuah@...nel.org>
Cc: David Hildenbrand <david@...hat.com>
Cc: Barry Song <baohua@...nel.org>
Cc: Baolin Wang <baolin.wang@...ux.alibaba.com>
Cc: Ryan Roberts <ryan.roberts@....com>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Peter Xu <peterx@...hat.com>
Cc: Zi Yan <ziy@...dia.com>
Cc: Kefeng Wang <wangkefeng.wang@...wei.com>
Cc: Jane Chu <jane.chu@...cle.com>
Cc: Alistair Popple <apopple@...dia.com>
Cc: Donet Tom <donettom@...ux.ibm.com>
Cc: Mika Penttilä <mpenttil@...hat.com>
Cc: Matthew Brost <matthew.brost@...el.com>
Cc: Francois Dugast <francois.dugast@...el.com>
Cc: Ralph Campbell <rcampbell@...dia.com>
Signed-off-by: Matthew Brost <matthew.brost@...el.com>
Signed-off-by: Balbir Singh <balbirs@...dia.com>
---
include/linux/huge_mm.h | 1 +
include/linux/rmap.h | 2 +
include/linux/swapops.h | 17 +++
mm/huge_memory.c | 268 +++++++++++++++++++++++++++++++++-------
mm/page_vma_mapped.c | 13 +-
mm/pgtable-generic.c | 6 +
mm/rmap.c | 22 +++-
7 files changed, 278 insertions(+), 51 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7748489fde1b..2a6f5ff7bca3 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -345,6 +345,7 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
+int split_device_private_folio(struct folio *folio);
int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
bool uniform_split_supported(struct folio *folio, unsigned int new_order,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 20803fcb49a7..625f36dcc121 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -905,6 +905,8 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
#define PVMW_SYNC (1 << 0)
/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
+/* Look for device private THP entries */
+#define PVMW_THP_DEVICE_PRIVATE (1 << 2)
struct page_vma_mapped_walk {
unsigned long pfn;
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 64ea151a7ae3..2641c01bd5d2 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -563,6 +563,7 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
{
return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
}
+
#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
@@ -594,6 +595,22 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION)
+
+static inline int is_pmd_device_private_entry(pmd_t pmd)
+{
+ return is_swap_pmd(pmd) && is_device_private_entry(pmd_to_swp_entry(pmd));
+}
+
+#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+static inline int is_pmd_device_private_entry(pmd_t pmd)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9c38a95e9f09..e373c6578894 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -72,6 +72,10 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc);
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc);
+static int __split_unmapped_folio(struct folio *folio, int new_order,
+ struct page *split_at, struct xa_state *xas,
+ struct address_space *mapping, bool uniform_split);
+
static bool split_underused_thp = true;
static atomic_t huge_zero_refcount;
@@ -1711,8 +1715,11 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (unlikely(is_swap_pmd(pmd))) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
- VM_BUG_ON(!is_pmd_migration_entry(pmd));
- if (!is_readable_migration_entry(entry)) {
+ VM_WARN_ON(!is_pmd_migration_entry(pmd) &&
+ !is_pmd_device_private_entry(pmd));
+
+ if (is_migration_entry(entry) &&
+ is_writable_migration_entry(entry)) {
entry = make_readable_migration_entry(
swp_offset(entry));
pmd = swp_entry_to_pmd(entry);
@@ -1722,6 +1729,32 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd = pmd_swp_mkuffd_wp(pmd);
set_pmd_at(src_mm, addr, src_pmd, pmd);
}
+
+ if (is_device_private_entry(entry)) {
+ if (is_writable_device_private_entry(entry)) {
+ entry = make_readable_device_private_entry(
+ swp_offset(entry));
+ pmd = swp_entry_to_pmd(entry);
+
+ if (pmd_swp_soft_dirty(*src_pmd))
+ pmd = pmd_swp_mksoft_dirty(pmd);
+ if (pmd_swp_uffd_wp(*src_pmd))
+ pmd = pmd_swp_mkuffd_wp(pmd);
+ set_pmd_at(src_mm, addr, src_pmd, pmd);
+ }
+
+ src_folio = pfn_swap_entry_folio(entry);
+ VM_WARN_ON(!folio_test_large(src_folio));
+
+ folio_get(src_folio);
+ /*
+ * folio_try_dup_anon_rmap_pmd does not fail for
+ * device private entries.
+ */
+ VM_WARN_ON(folio_try_dup_anon_rmap_pmd(src_folio,
+ &src_folio->page, dst_vma, src_vma));
+ }
+
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
@@ -2219,15 +2252,22 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
folio_remove_rmap_pmd(folio, page, vma);
WARN_ON_ONCE(folio_mapcount(folio) < 0);
VM_BUG_ON_PAGE(!PageHead(page), page);
- } else if (thp_migration_supported()) {
+ } else if (is_pmd_migration_entry(orig_pmd) ||
+ is_pmd_device_private_entry(orig_pmd)) {
swp_entry_t entry;
- VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
entry = pmd_to_swp_entry(orig_pmd);
folio = pfn_swap_entry_folio(entry);
flush_needed = 0;
- } else
- WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
+
+ if (!thp_migration_supported())
+ WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
+
+ if (is_pmd_device_private_entry(orig_pmd)) {
+ folio_remove_rmap_pmd(folio, &folio->page, vma);
+ WARN_ON_ONCE(folio_mapcount(folio) < 0);
+ }
+ }
if (folio_test_anon(folio)) {
zap_deposited_table(tlb->mm, pmd);
@@ -2247,6 +2287,15 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
folio_mark_accessed(folio);
}
+ /*
+ * Do a folio put on zone device private pages after
+ * changes to mm_counter, because the folio_put() will
+ * clean folio->mapping and the folio_test_anon() check
+ * will not be usable.
+ */
+ if (folio_is_device_private(folio))
+ folio_put(folio);
+
spin_unlock(ptl);
if (flush_needed)
tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
@@ -2375,7 +2424,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct folio *folio = pfn_swap_entry_folio(entry);
pmd_t newpmd;
- VM_BUG_ON(!is_pmd_migration_entry(*pmd));
+ VM_WARN_ON(!is_pmd_migration_entry(*pmd) &&
+ !folio_is_device_private(folio));
if (is_writable_migration_entry(entry)) {
/*
* A protection check is difficult so
@@ -2388,6 +2438,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
newpmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*pmd))
newpmd = pmd_swp_mksoft_dirty(newpmd);
+ } else if (is_writable_device_private_entry(entry)) {
+ entry = make_readable_device_private_entry(
+ swp_offset(entry));
+ newpmd = swp_entry_to_pmd(entry);
} else {
newpmd = *pmd;
}
@@ -2834,6 +2888,44 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
pmd_populate(mm, pmd, pgtable);
}
+/**
+ * split_huge_device_private_folio - split a huge device private folio into
+ * smaller pages (of order 0), currently used by migrate_device logic to
+ * split folios for pages that are partially mapped
+ *
+ * @folio: the folio to split
+ *
+ * The caller has to hold the folio_lock and a reference via folio_get
+ */
+int split_device_private_folio(struct folio *folio)
+{
+ struct folio *end_folio = folio_next(folio);
+ struct folio *new_folio;
+ int ret = 0;
+
+ /*
+ * Split the folio now. In the case of device
+ * private pages, this path is executed when
+ * the pmd is split and since freeze is not true
+ * it is likely the folio will be deferred_split.
+ *
+ * With device private pages, deferred splits of
+ * folios should be handled here to prevent partial
+ * unmaps from causing issues later on in migration
+ * and fault handling flows.
+ */
+ folio_ref_freeze(folio, 1 + folio_expected_ref_count(folio));
+ ret = __split_unmapped_folio(folio, 0, &folio->page, NULL, NULL, true);
+ VM_WARN_ON(ret);
+ for (new_folio = folio_next(folio); new_folio != end_folio;
+ new_folio = folio_next(new_folio)) {
+ folio_ref_unfreeze(new_folio, 1 + folio_expected_ref_count(
+ new_folio));
+ }
+ folio_ref_unfreeze(folio, 1 + folio_expected_ref_count(folio));
+ return ret;
+}
+
static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long haddr, bool freeze)
{
@@ -2842,16 +2934,19 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
struct page *page;
pgtable_t pgtable;
pmd_t old_pmd, _pmd;
- bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
- bool anon_exclusive = false, dirty = false;
+ bool young, write, soft_dirty, uffd_wp = false;
+ bool anon_exclusive = false, dirty = false, present = false;
unsigned long addr;
pte_t *pte;
int i;
+ swp_entry_t swp_entry;
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
- VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd));
+
+ VM_WARN_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
+ && !(is_pmd_device_private_entry(*pmd)));
count_vm_event(THP_SPLIT_PMD);
@@ -2899,18 +2994,60 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
return __split_huge_zero_page_pmd(vma, haddr, pmd);
}
- pmd_migration = is_pmd_migration_entry(*pmd);
- if (unlikely(pmd_migration)) {
- swp_entry_t entry;
+ present = pmd_present(*pmd);
+ if (unlikely(!present)) {
+ swp_entry = pmd_to_swp_entry(*pmd);
old_pmd = *pmd;
- entry = pmd_to_swp_entry(old_pmd);
- page = pfn_swap_entry_to_page(entry);
- write = is_writable_migration_entry(entry);
- if (PageAnon(page))
- anon_exclusive = is_readable_exclusive_migration_entry(entry);
- young = is_migration_entry_young(entry);
- dirty = is_migration_entry_dirty(entry);
+
+ folio = pfn_swap_entry_folio(swp_entry);
+ VM_WARN_ON(!is_migration_entry(swp_entry) &&
+ !is_device_private_entry(swp_entry));
+ page = pfn_swap_entry_to_page(swp_entry);
+
+ if (is_pmd_migration_entry(old_pmd)) {
+ write = is_writable_migration_entry(swp_entry);
+ if (PageAnon(page))
+ anon_exclusive =
+ is_readable_exclusive_migration_entry(
+ swp_entry);
+ young = is_migration_entry_young(swp_entry);
+ dirty = is_migration_entry_dirty(swp_entry);
+ } else if (is_pmd_device_private_entry(old_pmd)) {
+ write = is_writable_device_private_entry(swp_entry);
+ anon_exclusive = PageAnonExclusive(page);
+ if (freeze && anon_exclusive &&
+ folio_try_share_anon_rmap_pmd(folio, page))
+ freeze = false;
+ if (!freeze) {
+ rmap_t rmap_flags = RMAP_NONE;
+ unsigned long addr = haddr;
+ struct folio *new_folio;
+ struct folio *end_folio = folio_next(folio);
+
+ if (anon_exclusive)
+ rmap_flags |= RMAP_EXCLUSIVE;
+
+ folio_lock(folio);
+ folio_get(folio);
+
+ split_device_private_folio(folio);
+
+ for (new_folio = folio_next(folio);
+ new_folio != end_folio;
+ new_folio = folio_next(new_folio)) {
+ addr += PAGE_SIZE;
+ folio_unlock(new_folio);
+ folio_add_anon_rmap_ptes(new_folio,
+ &new_folio->page, 1,
+ vma, addr, rmap_flags);
+ }
+ folio_unlock(folio);
+ folio_add_anon_rmap_ptes(folio, &folio->page,
+ 1, vma, haddr, rmap_flags);
+ }
+ }
+
soft_dirty = pmd_swp_soft_dirty(old_pmd);
uffd_wp = pmd_swp_uffd_wp(old_pmd);
} else {
@@ -2996,30 +3133,49 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
* Note that NUMA hinting access restrictions are not transferred to
* avoid any possibility of altering permissions across VMAs.
*/
- if (freeze || pmd_migration) {
+ if (freeze || !present) {
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
pte_t entry;
- swp_entry_t swp_entry;
-
- if (write)
- swp_entry = make_writable_migration_entry(
- page_to_pfn(page + i));
- else if (anon_exclusive)
- swp_entry = make_readable_exclusive_migration_entry(
- page_to_pfn(page + i));
- else
- swp_entry = make_readable_migration_entry(
- page_to_pfn(page + i));
- if (young)
- swp_entry = make_migration_entry_young(swp_entry);
- if (dirty)
- swp_entry = make_migration_entry_dirty(swp_entry);
- entry = swp_entry_to_pte(swp_entry);
- if (soft_dirty)
- entry = pte_swp_mksoft_dirty(entry);
- if (uffd_wp)
- entry = pte_swp_mkuffd_wp(entry);
-
+ if (freeze || is_migration_entry(swp_entry)) {
+ if (write)
+ swp_entry = make_writable_migration_entry(
+ page_to_pfn(page + i));
+ else if (anon_exclusive)
+ swp_entry = make_readable_exclusive_migration_entry(
+ page_to_pfn(page + i));
+ else
+ swp_entry = make_readable_migration_entry(
+ page_to_pfn(page + i));
+ if (young)
+ swp_entry = make_migration_entry_young(swp_entry);
+ if (dirty)
+ swp_entry = make_migration_entry_dirty(swp_entry);
+ entry = swp_entry_to_pte(swp_entry);
+ if (soft_dirty)
+ entry = pte_swp_mksoft_dirty(entry);
+ if (uffd_wp)
+ entry = pte_swp_mkuffd_wp(entry);
+ } else {
+ /*
+ * anon_exclusive was already propagated to the relevant
+ * pages corresponding to the pte entries when freeze
+ * is false.
+ */
+ if (write)
+ swp_entry = make_writable_device_private_entry(
+ page_to_pfn(page + i));
+ else
+ swp_entry = make_readable_device_private_entry(
+ page_to_pfn(page + i));
+ /*
+ * Young and dirty bits are not progated via swp_entry
+ */
+ entry = swp_entry_to_pte(swp_entry);
+ if (soft_dirty)
+ entry = pte_swp_mksoft_dirty(entry);
+ if (uffd_wp)
+ entry = pte_swp_mkuffd_wp(entry);
+ }
VM_WARN_ON(!pte_none(ptep_get(pte + i)));
set_pte_at(mm, addr, pte + i, entry);
}
@@ -3046,7 +3202,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
}
pte_unmap(pte);
- if (!pmd_migration)
+ if (present)
folio_remove_rmap_pmd(folio, page, vma);
if (freeze)
put_page(page);
@@ -3058,8 +3214,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, bool freeze)
{
+
VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
- if (pmd_trans_huge(*pmd) || is_pmd_migration_entry(*pmd))
+ if (pmd_trans_huge(*pmd) || is_pmd_migration_entry(*pmd) ||
+ (is_pmd_device_private_entry(*pmd)))
__split_huge_pmd_locked(vma, pmd, address, freeze);
}
@@ -3238,6 +3396,9 @@ static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio);
lockdep_assert_held(&lruvec->lru_lock);
+ if (folio_is_device_private(folio))
+ return;
+
if (list) {
/* page reclaim is reclaiming a huge page */
VM_WARN_ON(folio_test_lru(folio));
@@ -3252,6 +3413,7 @@ static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
list_add_tail(&new_folio->lru, &folio->lru);
folio_set_lru(new_folio);
}
+
}
/* Racy check whether the huge page can be split */
@@ -3727,7 +3889,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
- if (folio_ref_freeze(folio, 1 + extra_pins)) {
+ if (folio_ref_freeze(folio, 1 + folio_expected_ref_count(folio))) {
struct address_space *swap_cache = NULL;
struct lruvec *lruvec;
int expected_refs;
@@ -4603,7 +4765,10 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
return 0;
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
- pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
+ if (unlikely(is_pmd_device_private_entry(*pvmw->pmd)))
+ pmdval = pmdp_huge_clear_flush(vma, address, pvmw->pmd);
+ else
+ pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
/* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
@@ -4653,6 +4818,17 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
entry = pmd_to_swp_entry(*pvmw->pmd);
folio_get(folio);
pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
+
+ if (folio_is_device_private(folio)) {
+ if (pmd_write(pmde))
+ entry = make_writable_device_private_entry(
+ page_to_pfn(new));
+ else
+ entry = make_readable_device_private_entry(
+ page_to_pfn(new));
+ pmde = swp_entry_to_pmd(entry);
+ }
+
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_writable_migration_entry(entry))
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index e981a1a292d2..246e6c211f34 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -250,12 +250,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
pmde = *pvmw->pmd;
if (!pmd_present(pmde)) {
- swp_entry_t entry;
+ swp_entry_t entry = pmd_to_swp_entry(pmde);
if (!thp_migration_supported() ||
!(pvmw->flags & PVMW_MIGRATION))
return not_found(pvmw);
- entry = pmd_to_swp_entry(pmde);
if (!is_migration_entry(entry) ||
!check_pmd(swp_offset_pfn(entry), pvmw))
return not_found(pvmw);
@@ -277,6 +276,16 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
* cannot return prematurely, while zap_huge_pmd() has
* cleared *pmd but not decremented compound_mapcount().
*/
+ swp_entry_t entry;
+
+ entry = pmd_to_swp_entry(pmde);
+
+ if (is_device_private_entry(entry) &&
+ (pvmw->flags & PVMW_THP_DEVICE_PRIVATE)) {
+ pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+ return true;
+ }
+
if ((pvmw->flags & PVMW_SYNC) &&
thp_vma_suitable_order(vma, pvmw->address,
PMD_ORDER) &&
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 567e2d084071..604e8206a2ec 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -292,6 +292,12 @@ pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
*pmdvalp = pmdval;
if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
goto nomap;
+ if (is_swap_pmd(pmdval)) {
+ swp_entry_t entry = pmd_to_swp_entry(pmdval);
+
+ if (is_device_private_entry(entry))
+ goto nomap;
+ }
if (unlikely(pmd_trans_huge(pmdval)))
goto nomap;
if (unlikely(pmd_bad(pmdval))) {
diff --git a/mm/rmap.c b/mm/rmap.c
index f93ce27132ab..5c5c1c777ce3 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2281,7 +2281,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address,
+ PVMW_THP_DEVICE_PRIVATE);
bool anon_exclusive, writable, ret = true;
pte_t pteval;
struct page *subpage;
@@ -2326,6 +2327,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
while (page_vma_mapped_walk(&pvmw)) {
/* PMD-mapped THP migration entry */
if (!pvmw.pte) {
+ unsigned long pfn;
+
if (flags & TTU_SPLIT_HUGE_PMD) {
split_huge_pmd_locked(vma, pvmw.address,
pvmw.pmd, true);
@@ -2334,8 +2337,21 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
break;
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
- subpage = folio_page(folio,
- pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
+ /*
+ * Zone device private folios do not work well with
+ * pmd_pfn() on some architectures due to pte
+ * inversion.
+ */
+ if (is_pmd_device_private_entry(*pvmw.pmd)) {
+ swp_entry_t entry = pmd_to_swp_entry(*pvmw.pmd);
+
+ pfn = swp_offset_pfn(entry);
+ } else {
+ pfn = pmd_pfn(*pvmw.pmd);
+ }
+
+ subpage = folio_page(folio, pfn - folio_pfn(folio));
+
VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
!folio_test_pmd_mappable(folio), folio);
--
2.50.1
Powered by blists - more mailing lists