[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250628113435.46678-2-dev.jain@arm.com>
Date: Sat, 28 Jun 2025 17:04:32 +0530
From: Dev Jain <dev.jain@....com>
To: akpm@...ux-foundation.org
Cc: ryan.roberts@....com,
david@...hat.com,
willy@...radead.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
catalin.marinas@....com,
will@...nel.org,
Liam.Howlett@...cle.com,
lorenzo.stoakes@...cle.com,
vbabka@...e.cz,
jannh@...gle.com,
anshuman.khandual@....com,
peterx@...hat.com,
joey.gouly@....com,
ioworker0@...il.com,
baohua@...nel.org,
kevin.brodsky@....com,
quic_zhenhuah@...cinc.com,
christophe.leroy@...roup.eu,
yangyicong@...ilicon.com,
linux-arm-kernel@...ts.infradead.org,
hughd@...gle.com,
yang@...amperecomputing.com,
ziy@...dia.com,
Dev Jain <dev.jain@....com>
Subject: [PATCH v4 1/4] mm: Optimize mprotect() for MM_CP_PROT_NUMA by batch-skipping PTEs
In case of prot_numa, there are various cases in which we can skip to the
next iteration. Since the skip condition is based on the folio and not
the PTEs, we can skip a PTE batch. Additionally refactor all of this
into a new function to clean up the existing code.
Signed-off-by: Dev Jain <dev.jain@....com>
---
mm/mprotect.c | 134 ++++++++++++++++++++++++++++++++------------------
1 file changed, 87 insertions(+), 47 deletions(-)
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 88709c01177b..af10a7fbe6b8 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -83,6 +83,83 @@ bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
return pte_dirty(pte);
}
+static int mprotect_folio_pte_batch(struct folio *folio, unsigned long addr,
+ pte_t *ptep, pte_t pte, int max_nr_ptes)
+{
+ const fpb_t flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
+
+ if (!folio || !folio_test_large(folio) || (max_nr_ptes == 1))
+ return 1;
+
+ return folio_pte_batch(folio, addr, ptep, pte, max_nr_ptes, flags,
+ NULL, NULL, NULL);
+}
+
+static int prot_numa_skip_ptes(struct folio **foliop, struct vm_area_struct *vma,
+ unsigned long addr, pte_t oldpte, pte_t *pte, int target_node,
+ int max_nr_ptes)
+{
+ struct folio *folio = NULL;
+ int nr_ptes = 1;
+ bool toptier;
+ int nid;
+
+ /* Avoid TLB flush if possible */
+ if (pte_protnone(oldpte))
+ goto skip_batch;
+
+ folio = vm_normal_folio(vma, addr, oldpte);
+ if (!folio)
+ goto skip_batch;
+
+ if (folio_is_zone_device(folio) || folio_test_ksm(folio))
+ goto skip_batch;
+
+ /* Also skip shared copy-on-write pages */
+ if (is_cow_mapping(vma->vm_flags) &&
+ (folio_maybe_dma_pinned(folio) || folio_maybe_mapped_shared(folio)))
+ goto skip_batch;
+
+ /*
+ * While migration can move some dirty pages,
+ * it cannot move them all from MIGRATE_ASYNC
+ * context.
+ */
+ if (folio_is_file_lru(folio) && folio_test_dirty(folio))
+ goto skip_batch;
+
+ /*
+ * Don't mess with PTEs if page is already on the node
+ * a single-threaded process is running on.
+ */
+ nid = folio_nid(folio);
+ if (target_node == nid)
+ goto skip_batch;
+
+ toptier = node_is_toptier(nid);
+
+ /*
+ * Skip scanning top tier node if normal numa
+ * balancing is disabled
+ */
+ if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && toptier)
+ goto skip_batch;
+
+ if (folio_use_access_time(folio)) {
+ folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
+
+ /* Do not skip in this case */
+ nr_ptes = 0;
+ goto out;
+ }
+
+skip_batch:
+ nr_ptes = mprotect_folio_pte_batch(folio, addr, pte, oldpte, max_nr_ptes);
+out:
+ *foliop = folio;
+ return nr_ptes;
+}
+
static long change_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
@@ -94,6 +171,7 @@ static long change_pte_range(struct mmu_gather *tlb,
bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
+ int nr_ptes;
tlb_change_page_size(tlb, PAGE_SIZE);
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -108,8 +186,11 @@ static long change_pte_range(struct mmu_gather *tlb,
flush_tlb_batched_pending(vma->vm_mm);
arch_enter_lazy_mmu_mode();
do {
+ nr_ptes = 1;
oldpte = ptep_get(pte);
if (pte_present(oldpte)) {
+ int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
+ struct folio *folio = NULL;
pte_t ptent;
/*
@@ -117,53 +198,12 @@ static long change_pte_range(struct mmu_gather *tlb,
* pages. See similar comment in change_huge_pmd.
*/
if (prot_numa) {
- struct folio *folio;
- int nid;
- bool toptier;
-
- /* Avoid TLB flush if possible */
- if (pte_protnone(oldpte))
- continue;
-
- folio = vm_normal_folio(vma, addr, oldpte);
- if (!folio || folio_is_zone_device(folio) ||
- folio_test_ksm(folio))
- continue;
-
- /* Also skip shared copy-on-write pages */
- if (is_cow_mapping(vma->vm_flags) &&
- (folio_maybe_dma_pinned(folio) ||
- folio_maybe_mapped_shared(folio)))
- continue;
-
- /*
- * While migration can move some dirty pages,
- * it cannot move them all from MIGRATE_ASYNC
- * context.
- */
- if (folio_is_file_lru(folio) &&
- folio_test_dirty(folio))
- continue;
-
- /*
- * Don't mess with PTEs if page is already on the node
- * a single-threaded process is running on.
- */
- nid = folio_nid(folio);
- if (target_node == nid)
- continue;
- toptier = node_is_toptier(nid);
-
- /*
- * Skip scanning top tier node if normal numa
- * balancing is disabled
- */
- if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
- toptier)
+ nr_ptes = prot_numa_skip_ptes(&folio, vma,
+ addr, oldpte, pte,
+ target_node,
+ max_nr_ptes);
+ if (nr_ptes)
continue;
- if (folio_use_access_time(folio))
- folio_xchg_access_time(folio,
- jiffies_to_msecs(jiffies));
}
oldpte = ptep_modify_prot_start(vma, addr, pte);
@@ -280,7 +320,7 @@ static long change_pte_range(struct mmu_gather *tlb,
pages++;
}
}
- } while (pte++, addr += PAGE_SIZE, addr != end);
+ } while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
--
2.30.2
Powered by blists - more mailing lists