[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <070ba06d-c32c-41ad-af0b-4bbc5e59f085@lucifer.local>
Date: Fri, 18 Jul 2025 17:40:52 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Dev Jain <dev.jain@....com>
Cc: akpm@...ux-foundation.org, ryan.roberts@....com, david@...hat.com,
willy@...radead.org, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
catalin.marinas@....com, will@...nel.org, Liam.Howlett@...cle.com,
vbabka@...e.cz, jannh@...gle.com, anshuman.khandual@....com,
peterx@...hat.com, joey.gouly@....com, ioworker0@...il.com,
baohua@...nel.org, kevin.brodsky@....com, quic_zhenhuah@...cinc.com,
christophe.leroy@...roup.eu, yangyicong@...ilicon.com,
linux-arm-kernel@...ts.infradead.org, hughd@...gle.com,
yang@...amperecomputing.com, ziy@...dia.com
Subject: Re: [PATCH v5 2/7] mm: Optimize mprotect() for MM_CP_PROT_NUMA by
batch-skipping PTEs
On Fri, Jul 18, 2025 at 02:32:39PM +0530, Dev Jain wrote:
> For the MM_CP_PROT_NUMA skipping case, observe that, if we skip an
> iteration due to the underlying folio satisfying any of the skip
> conditions, then for all subsequent ptes which map the same folio, the
> iteration will be skipped for them too. Therefore, we can optimize
> by using folio_pte_batch() to batch skip the iterations.
>
> Use prot_numa_skip() introduced in the previous patch to determine whether
> we need to skip the iteration. Change its signature to have a double
> pointer to a folio, which will be used by mprotect_folio_pte_batch() to
> determine the number of iterations we can safely skip.
On reflection I don't think this is too bad, it seems neat in this version
at least :)
>
> Signed-off-by: Dev Jain <dev.jain@....com>
Overall looks good to me, so:
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
> ---
> mm/mprotect.c | 55 +++++++++++++++++++++++++++++++++++++++------------
> 1 file changed, 42 insertions(+), 13 deletions(-)
>
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index 2a9c73bd0778..97adc62c50ab 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -83,28 +83,43 @@ bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
> return pte_dirty(pte);
> }
>
> +static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
> + pte_t pte, int max_nr_ptes)
> +{
> + /* No underlying folio, so cannot batch */
> + if (!folio)
> + return 1;
> +
> + if (!folio_test_large(folio))
> + return 1;
> +
> + return folio_pte_batch(folio, ptep, pte, max_nr_ptes);
> +}
> +
> static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
> - pte_t oldpte, pte_t *pte, int target_node)
> + pte_t oldpte, pte_t *pte, int target_node,
> + struct folio **foliop)
> {
> - struct folio *folio;
> + struct folio *folio = NULL;
> + bool ret = true;
> bool toptier;
> int nid;
>
> /* Avoid TLB flush if possible */
> if (pte_protnone(oldpte))
> - return true;
> + goto skip;
>
> folio = vm_normal_folio(vma, addr, oldpte);
> if (!folio)
> - return true;
> + goto skip;
>
> if (folio_is_zone_device(folio) || folio_test_ksm(folio))
> - return true;
> + goto skip;
>
> /* Also skip shared copy-on-write pages */
> if (is_cow_mapping(vma->vm_flags) &&
> (folio_maybe_dma_pinned(folio) || folio_maybe_mapped_shared(folio)))
> - return true;
> + goto skip;
>
> /*
> * While migration can move some dirty pages,
> @@ -112,7 +127,7 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
> * context.
> */
> if (folio_is_file_lru(folio) && folio_test_dirty(folio))
> - return true;
> + goto skip;
>
> /*
> * Don't mess with PTEs if page is already on the node
> @@ -120,7 +135,7 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
> */
> nid = folio_nid(folio);
> if (target_node == nid)
> - return true;
> + goto skip;
>
> toptier = node_is_toptier(nid);
>
> @@ -129,11 +144,15 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
> * balancing is disabled
> */
> if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && toptier)
> - return true;
> + goto skip;
>
> + ret = false;
> if (folio_use_access_time(folio))
> folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
> - return false;
> +
> +skip:
> + *foliop = folio;
> + return ret;
> }
>
> static long change_pte_range(struct mmu_gather *tlb,
> @@ -147,6 +166,7 @@ static long change_pte_range(struct mmu_gather *tlb,
> bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
> bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
> bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
> + int nr_ptes;
>
> tlb_change_page_size(tlb, PAGE_SIZE);
> pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
> @@ -161,8 +181,11 @@ static long change_pte_range(struct mmu_gather *tlb,
> flush_tlb_batched_pending(vma->vm_mm);
> arch_enter_lazy_mmu_mode();
> do {
> + nr_ptes = 1;
> oldpte = ptep_get(pte);
> if (pte_present(oldpte)) {
> + int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
> + struct folio *folio;
> pte_t ptent;
>
> /*
> @@ -170,9 +193,15 @@ static long change_pte_range(struct mmu_gather *tlb,
> * pages. See similar comment in change_huge_pmd.
> */
> if (prot_numa) {
> - if (prot_numa_skip(vma, addr, oldpte, pte,
> - target_node))
> + int ret = prot_numa_skip(vma, addr, oldpte, pte,
> + target_node, &folio);
> + if (ret) {
> +
> + /* determine batch to skip */
> + nr_ptes = mprotect_folio_pte_batch(folio,
> + pte, oldpte, max_nr_ptes);
> continue;
> + }
> }
>
> oldpte = ptep_modify_prot_start(vma, addr, pte);
> @@ -289,7 +318,7 @@ static long change_pte_range(struct mmu_gather *tlb,
> pages++;
> }
> }
> - } while (pte++, addr += PAGE_SIZE, addr != end);
> + } while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
> arch_leave_lazy_mmu_mode();
> pte_unmap_unlock(pte - 1, ptl);
>
> --
> 2.30.2
>
Powered by blists - more mailing lists