[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fe823cc1-e4d4-450f-889f-57339a4dd967@arm.com>
Date: Fri, 13 Jun 2025 09:54:28 +0530
From: Dev Jain <dev.jain@....com>
To: David Hildenbrand <david@...hat.com>, akpm@...ux-foundation.org
Cc: Liam.Howlett@...cle.com, lorenzo.stoakes@...cle.com, vbabka@...e.cz,
jannh@...gle.com, pfalcato@...e.de, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, peterx@...hat.com, ryan.roberts@....com,
mingo@...nel.org, libang.li@...group.com, maobibo@...ngson.cn,
zhengqi.arch@...edance.com, baohua@...nel.org, anshuman.khandual@....com,
willy@...radead.org, ioworker0@...il.com, yang@...amperecomputing.com,
baolin.wang@...ux.alibaba.com, ziy@...dia.com, hughd@...gle.com
Subject: Re: [PATCH v4 2/2] mm: Optimize mremap() by PTE batching
On 11/06/25 7:30 pm, David Hildenbrand wrote:
> On 10.06.25 05:50, Dev Jain wrote:
>> Use folio_pte_batch() to optimize move_ptes(). On arm64, if the ptes
>> are painted with the contig bit, then ptep_get() will iterate through
>> all 16
>> entries to collect a/d bits. Hence this optimization will result in a
>> 16x
>> reduction in the number of ptep_get() calls. Next, ptep_get_and_clear()
>> will eventually call contpte_try_unfold() on every contig block, thus
>> flushing the TLB for the complete large folio range. Instead, use
>> get_and_clear_full_ptes() so as to elide TLBIs on each contig block,
>> and only
>> do them on the starting and ending contig block.
>>
>> For split folios, there will be no pte batching; nr_ptes will be 1. For
>> pagetable splitting, the ptes will still point to the same large folio;
>> for arm64, this results in the optimization described above, and for
>> other
>> arches (including the general case), a minor improvement is expected
>> due to
>> a reduction in the number of function calls.
>>
>> Signed-off-by: Dev Jain <dev.jain@....com>
>> ---
>> mm/mremap.c | 39 ++++++++++++++++++++++++++++++++-------
>> 1 file changed, 32 insertions(+), 7 deletions(-)
>>
>> diff --git a/mm/mremap.c b/mm/mremap.c
>> index 180b12225368..18b215521ada 100644
>> --- a/mm/mremap.c
>> +++ b/mm/mremap.c
>> @@ -170,6 +170,23 @@ static pte_t move_soft_dirty_pte(pte_t pte)
>> return pte;
>> }
>> +static int mremap_folio_pte_batch(struct vm_area_struct *vma,
>> unsigned long addr,
>> + pte_t *ptep, pte_t pte, int max_nr)
>> +{
>> + const fpb_t flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
>> + struct folio *folio;
>> +
>> + if (max_nr == 1)
>> + return 1;
>> +
>> + folio = vm_normal_folio(vma, addr, pte);
>> + if (!folio || !folio_test_large(folio))
>> + return 1;
>> +
>> + return folio_pte_batch(folio, addr, ptep, pte, max_nr, flags, NULL,
>> + NULL, NULL);
>> +}
>> +
>> static int move_ptes(struct pagetable_move_control *pmc,
>> unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
>> {
>> @@ -177,7 +194,7 @@ static int move_ptes(struct
>> pagetable_move_control *pmc,
>> bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
>> struct mm_struct *mm = vma->vm_mm;
>> pte_t *old_ptep, *new_ptep;
>> - pte_t pte;
>> + pte_t old_pte, pte;
>> pmd_t dummy_pmdval;
>> spinlock_t *old_ptl, *new_ptl;
>> bool force_flush = false;
>> @@ -185,6 +202,8 @@ static int move_ptes(struct
>> pagetable_move_control *pmc,
>> unsigned long new_addr = pmc->new_addr;
>> unsigned long old_end = old_addr + extent;
>> unsigned long len = old_end - old_addr;
>> + int max_nr_ptes;
>> + int nr_ptes;
>> int err = 0;
>> /*
>> @@ -236,14 +255,16 @@ static int move_ptes(struct
>> pagetable_move_control *pmc,
>> flush_tlb_batched_pending(vma->vm_mm);
>> arch_enter_lazy_mmu_mode();
>> - for (; old_addr < old_end; old_ptep++, old_addr += PAGE_SIZE,
>> - new_ptep++, new_addr += PAGE_SIZE) {
>> + for (; old_addr < old_end; old_ptep += nr_ptes, old_addr +=
>> nr_ptes * PAGE_SIZE,
>> + new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) {
>> VM_WARN_ON_ONCE(!pte_none(*new_ptep));
>> - if (pte_none(ptep_get(old_ptep)))
>> + nr_ptes = 1;
>> + max_nr_ptes = (old_end - old_addr) >> PAGE_SHIFT;
>> + old_pte = ptep_get(old_ptep);
>> + if (pte_none(old_pte))
>> continue;
>> - pte = ptep_get_and_clear(mm, old_addr, old_ptep);
>> /*
>> * If we are remapping a valid PTE, make sure
>> * to flush TLB before we drop the PTL for the
>> @@ -255,8 +276,12 @@ static int move_ptes(struct
>> pagetable_move_control *pmc,
>> * the TLB entry for the old mapping has been
>> * flushed.
>> */
>> - if (pte_present(pte))
>> + if (pte_present(old_pte)) {
>> + nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep,
>> + old_pte, max_nr_ptes);
>> force_flush = true;
>> + }
>> + pte = get_and_clear_full_ptes(mm, old_addr, old_ptep,
>> nr_ptes, 0);
>> pte = move_pte(pte, old_addr, new_addr);
>> pte = move_soft_dirty_pte(pte);
>> @@ -269,7 +294,7 @@ static int move_ptes(struct
>> pagetable_move_control *pmc,
>> else if (is_swap_pte(pte))
>> pte = pte_swp_clear_uffd_wp(pte);
>> }
>> - set_pte_at(mm, new_addr, new_ptep, pte);
>> + set_ptes(mm, new_addr, new_ptep, pte, nr_ptes);
>
>
> What I dislike is that some paths work on a single PTE, and we
> implicitly have to know
> that they don't apply for !pte_present.
>
> Like
> if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
>
> Will not get batched yet. And that is hidden inside the
> pte_marker_uffd_wp check ...
>
> Should we properly separate both paths (present vs. !present), and
> while at it, do
> some more cleanups? I'm thinking of the following on top (only
> compile-tested)
Good observation! Just one doubt, see below.
>
>
> diff --git a/mm/mremap.c b/mm/mremap.c
> index 18b215521adae..b88abf02b34e0 100644
> --- a/mm/mremap.c
> +++ b/mm/mremap.c
> @@ -155,21 +155,6 @@ static void drop_rmap_locks(struct vm_area_struct
> *vma)
> i_mmap_unlock_write(vma->vm_file->f_mapping);
> }
>
> -static pte_t move_soft_dirty_pte(pte_t pte)
> -{
> - /*
> - * Set soft dirty bit so we can notice
> - * in userspace the ptes were moved.
> - */
> -#ifdef CONFIG_MEM_SOFT_DIRTY
> - if (pte_present(pte))
> - pte = pte_mksoft_dirty(pte);
> - else if (is_swap_pte(pte))
> - pte = pte_swp_mksoft_dirty(pte);
> -#endif
> - return pte;
> -}
> -
> static int mremap_folio_pte_batch(struct vm_area_struct *vma,
> unsigned long addr,
> pte_t *ptep, pte_t pte, int max_nr)
> {
> @@ -260,7 +245,6 @@ static int move_ptes(struct pagetable_move_control
> *pmc,
> VM_WARN_ON_ONCE(!pte_none(*new_ptep));
>
> nr_ptes = 1;
> - max_nr_ptes = (old_end - old_addr) >> PAGE_SHIFT;
> old_pte = ptep_get(old_ptep);
> if (pte_none(old_pte))
> continue;
> @@ -277,24 +261,34 @@ static int move_ptes(struct
> pagetable_move_control *pmc,
> * flushed.
> */
> if (pte_present(old_pte)) {
> + max_nr_ptes = (old_end - old_addr) >> PAGE_SHIFT;
> nr_ptes = mremap_folio_pte_batch(vma,
> old_addr, old_ptep,
> old_pte,
> max_nr_ptes);
> force_flush = true;
> - }
> - pte = get_and_clear_full_ptes(mm, old_addr, old_ptep,
> nr_ptes, 0);
> - pte = move_pte(pte, old_addr, new_addr);
> - pte = move_soft_dirty_pte(pte);
> -
> - if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
> - pte_clear(mm, new_addr, new_ptep);
> - else {
> - if (need_clear_uffd_wp) {
> - if (pte_present(pte))
> - pte = pte_clear_uffd_wp(pte);
> - else if (is_swap_pte(pte))
> +
> + pte = get_and_clear_full_ptes(mm, old_addr,
> old_ptep,
> + nr_ptes, 0);
> + /*
> + * Moving present PTEs requires special care
> on some
> + * archs.
> + */
> + pte = move_pte(pte, old_addr, new_addr);
> + /* make userspace aware that this pte moved. */
> + pte = pte_mksoft_dirty(pte);
> + if (need_clear_uffd_wp)
> + pte = pte_clear_uffd_wp(pte);
> + set_ptes(mm, new_addr, new_ptep, pte, nr_ptes);
> + } else if (need_clear_uffd_wp &&
> pte_marker_uffd_wp(pte)) {
> + pte_clear(mm, old_addr, old_ptep);
> + } else {
> + pte_clear(mm, old_addr, old_ptep);
Should pte_clear be included here? It is currently being done only for
the case
need_clear_uffd_wp && pte_marker_uffd_wp().
> + if (is_swap_pte(pte)) {
> + if (need_clear_uffd_wp)
> pte = pte_swp_clear_uffd_wp(pte);
> + /* make userspace aware that this pte
> moved. */
> + pte = pte_swp_mksoft_dirty(pte);
> }
> - set_ptes(mm, new_addr, new_ptep, pte, nr_ptes);
> + set_pte_at(mm, new_addr, new_ptep, pte);
> }
> }
>
>
>
> Note that I don't know why we had the existing
>
> - if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
> - pte_clear(mm, new_addr, new_ptep);
>
>
> I thought we would always expect that the destination pte is already
> pte_none() ?
>
Powered by blists - more mailing lists