[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250703222759.1943776-6-balbirs@nvidia.com>
Date: Fri, 4 Jul 2025 08:27:52 +1000
From: Balbir Singh <balbirs@...dia.com>
To: linux-mm@...ck.org
Cc: akpm@...ux-foundation.org,
linux-kernel@...r.kernel.org,
Balbir Singh <balbirs@...dia.com>
Subject: [v1 05/12] mm/memory/fault: add support for zone device THP fault handling
When the CPU touches a zone device THP entry, the data needs to
be migrated back to the CPU, call migrate_to_ram() on these pages
via do_huge_pmd_device_private() fault handling helper.
Signed-off-by: Balbir Singh <balbirs@...dia.com>
---
include/linux/huge_mm.h | 7 +++++++
mm/huge_memory.c | 40 ++++++++++++++++++++++++++++++++++++++++
mm/memory.c | 6 ++++--
3 files changed, 51 insertions(+), 2 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 4d5bb67dc4ec..65a1bdf29bb9 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -474,6 +474,8 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
+vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf);
+
extern struct folio *huge_zero_folio;
extern unsigned long huge_zero_pfn;
@@ -627,6 +629,11 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
return 0;
}
+static inline vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
+{
+ return 0;
+}
+
static inline bool is_huge_zero_folio(const struct folio *folio)
{
return false;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e6e390d0308f..f29add796931 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1267,6 +1267,46 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
}
+vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ vm_fault_t ret = 0;
+ spinlock_t *ptl;
+ swp_entry_t swp_entry;
+ struct page *page;
+
+ if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
+ return VM_FAULT_FALLBACK;
+
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
+ vma_end_read(vma);
+ return VM_FAULT_RETRY;
+ }
+
+ ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ swp_entry = pmd_to_swp_entry(vmf->orig_pmd);
+ page = pfn_swap_entry_to_page(swp_entry);
+ vmf->page = page;
+ vmf->pte = NULL;
+ if (trylock_page(vmf->page)) {
+ get_page(page);
+ spin_unlock(ptl);
+ ret = page_pgmap(page)->ops->migrate_to_ram(vmf);
+ unlock_page(vmf->page);
+ put_page(page);
+ } else {
+ spin_unlock(ptl);
+ }
+
+ return ret;
+}
+
/*
* always: directly stall for all thp allocations
* defer: wake kswapd and fail if not immediately available
diff --git a/mm/memory.c b/mm/memory.c
index 0f9b32a20e5b..c26c421b8325 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6165,8 +6165,10 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
- VM_BUG_ON(thp_migration_supported() &&
- !is_pmd_migration_entry(vmf.orig_pmd));
+ if (is_device_private_entry(
+ pmd_to_swp_entry(vmf.orig_pmd)))
+ return do_huge_pmd_device_private(&vmf);
+
if (is_pmd_migration_entry(vmf.orig_pmd))
pmd_migration_entry_wait(mm, vmf.pmd);
return 0;
--
2.49.0
Powered by blists - more mailing lists