[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAGsJ_4wyByWJqzsDGhx=4=Xs+3uUZt6PZdyVoUCUMAo350cm-g@mail.gmail.com>
Date: Wed, 25 Jun 2025 22:38:57 +1200
From: Barry Song <21cnbao@...il.com>
To: David Hildenbrand <david@...hat.com>
Cc: Lance Yang <ioworker0@...il.com>, akpm@...ux-foundation.org,
baolin.wang@...ux.alibaba.com, chrisl@...nel.org, kasong@...cent.com,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-riscv@...ts.infradead.org,
lorenzo.stoakes@...cle.com, ryan.roberts@....com, v-songbaohua@...o.com,
x86@...nel.org, ying.huang@...el.com, zhengtangquan@...o.com
Subject: Re: [PATCH v4 3/4] mm: Support batched unmap for lazyfree large
folios during reclamation
> > diff --git a/mm/rmap.c b/mm/rmap.c
> > index fb63d9256f09..241d55a92a47 100644
> > --- a/mm/rmap.c
> > +++ b/mm/rmap.c
> > @@ -1847,12 +1847,25 @@ void folio_remove_rmap_pud(struct folio *folio, struct page *page,
> >
> > /* We support batch unmapping of PTEs for lazyfree large folios */
> > static inline bool can_batch_unmap_folio_ptes(unsigned long addr,
> > - struct folio *folio, pte_t *ptep)
> > + struct folio *folio, pte_t *ptep,
> > + struct vm_area_struct *vma)
> > {
> > const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
> > + unsigned long next_pmd, vma_end, end_addr;
> > int max_nr = folio_nr_pages(folio);
> > pte_t pte = ptep_get(ptep);
> >
> > + /*
> > + * Limit the batch scan within a single VMA and within a single
> > + * page table.
> > + */
> > + vma_end = vma->vm_end;
> > + next_pmd = ALIGN(addr + 1, PMD_SIZE);
> > + end_addr = addr + (unsigned long)max_nr * PAGE_SIZE;
> > +
> > + if (end_addr > min(next_pmd, vma_end))
> > + return false;
>
> May I suggest that we clean all that up as we fix it?
>
> Maybe something like this:
>
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 3b74bb19c11dd..11fbddc6ad8d6 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1845,23 +1845,38 @@ void folio_remove_rmap_pud(struct folio *folio, struct page *page,
> #endif
> }
>
> -/* We support batch unmapping of PTEs for lazyfree large folios */
> -static inline bool can_batch_unmap_folio_ptes(unsigned long addr,
> - struct folio *folio, pte_t *ptep)
> +static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
> + struct page_vma_mapped_walk *pvmw, enum ttu_flags flags,
> + pte_t pte)
> {
> const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
> - int max_nr = folio_nr_pages(folio);
> - pte_t pte = ptep_get(ptep);
> + struct vm_area_struct *vma = pvmw->vma;
> + unsigned long end_addr, addr = pvmw->address;
> + unsigned int max_nr;
> +
> + if (flags & TTU_HWPOISON)
> + return 1;
> + if (!folio_test_large(folio))
> + return 1;
> +
> + /* We may only batch within a single VMA and a single page table. */
> + end_addr = min_t(unsigned long, ALIGN(addr + 1, PMD_SIZE), vma->vm_end);
Is this pmd_addr_end()?
> + max_nr = (end_addr - addr) >> PAGE_SHIFT;
>
> + /* We only support lazyfree batching for now ... */
> if (!folio_test_anon(folio) || folio_test_swapbacked(folio))
> - return false;
> + return 1;
> if (pte_unused(pte))
> - return false;
> - if (pte_pfn(pte) != folio_pfn(folio))
> - return false;
> + return 1;
> + /* ... where we must be able to batch the whole folio. */
> + if (pte_pfn(pte) != folio_pfn(folio) || max_nr != folio_nr_pages(folio))
> + return 1;
> + max_nr = folio_pte_batch(folio, addr, pvmw->pte, pte, max_nr, fpb_flags,
> + NULL, NULL, NULL);
>
> - return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
> - NULL, NULL) == max_nr;
> + if (max_nr != folio_nr_pages(folio))
> + return 1;
> + return max_nr;
> }
>
> /*
> @@ -2024,9 +2039,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> if (pte_dirty(pteval))
> folio_mark_dirty(folio);
> } else if (likely(pte_present(pteval))) {
> - if (folio_test_large(folio) && !(flags & TTU_HWPOISON) &&
> - can_batch_unmap_folio_ptes(address, folio, pvmw.pte))
> - nr_pages = folio_nr_pages(folio);
> + nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval);
> end_addr = address + nr_pages * PAGE_SIZE;
> flush_cache_range(vma, address, end_addr);
>
>
> Note that I don't quite understand why we have to batch the whole thing or fallback to
> individual pages. Why can't we perform other batches that span only some PTEs? What's special
> about 1 PTE vs. 2 PTEs vs. all PTEs?
>
>
> Can someone enlighten me why that is required?
It's probably not a strict requirement — I thought cases where the
count is greater than 1 but less than nr_pages might not provide much
practical benefit, except perhaps in very rare edge cases, since
madv_free() already calls split_folio().
if (folio_test_large(folio)) {
bool any_young, any_dirty;
nr = madvise_folio_pte_batch(addr, end, folio, pte,
ptent,
&any_young, &any_dirty);
if (nr < folio_nr_pages(folio)) {
...
err = split_folio(folio);
...
}
}
Another reason is that when we extend this to non-lazyfree anonymous
folios [1], things get complicated: checking anon_exclusive and updating
folio_try_share_anon_rmap_pte with the number of PTEs becomes tricky if
a folio is partially exclusive and partially shared.
[1] https://lore.kernel.org/linux-mm/20250513084620.58231-1-21cnbao@gmail.com/
>
> --
> Cheers,
>
> David / dhildenb
>
Thanks
Barry
Powered by blists - more mailing lists