lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Tue, 30 Apr 2024 20:34:51 +1200
From: Barry Song <21cnbao@...il.com>
To: Lance Yang <ioworker0@...il.com>
Cc: akpm@...ux-foundation.org, willy@...radead.org, maskray@...gle.com, 
	ziy@...dia.com, ryan.roberts@....com, david@...hat.com, mhocko@...e.com, 
	fengwei.yin@...el.com, zokeefe@...gle.com, shy828301@...il.com, 
	xiehuan09@...il.com, libang.li@...group.com, wangkefeng.wang@...wei.com, 
	songmuchun@...edance.com, peterx@...hat.com, minchan@...nel.org, 
	linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 3/3] mm/vmscan: avoid split lazyfree THP during shrink_folio_list()

On Tue, Apr 30, 2024 at 1:23 AM Lance Yang <ioworker0@...il.com> wrote:
>
> When the user no longer requires the pages, they would use
> madvise(MADV_FREE) to mark the pages as lazy free. Subsequently, they
> typically would not re-write to that memory again.
>
> During memory reclaim, if we detect that the large folio and its PMD are
> both still marked as clean and there are no unexpected references
> (such as GUP), so we can just discard the memory lazily, improving the
> efficiency of memory reclamation in this case.
>
> On an Intel i5 CPU, reclaiming 1GiB of lazyfree THPs using
> mem_cgroup_force_empty() results in the following runtimes in seconds
> (shorter is better):
>
> --------------------------------------------
> |     Old       |      New       |  Change  |
> --------------------------------------------
> |   0.683426    |    0.049197    |  -92.80% |
> --------------------------------------------
>
> Suggested-by: Zi Yan <ziy@...dia.com>
> Suggested-by: David Hildenbrand <david@...hat.com>
> Signed-off-by: Lance Yang <ioworker0@...il.com>
> ---
>  include/linux/huge_mm.h |  2 ++
>  mm/huge_memory.c        | 75 +++++++++++++++++++++++++++++++++++++++++
>  mm/rmap.c               |  3 ++
>  3 files changed, 80 insertions(+)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 2daadfcc6776..fd330f72b4f3 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -38,6 +38,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>                     unsigned long cp_flags);
>  void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
>                            pmd_t *pmd, bool freeze, struct folio *folio);
> +bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
> +                          pmd_t *pmdp, struct folio *folio);
>
>  vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
>  vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 145505a1dd05..d35d526ed48f 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2690,6 +2690,81 @@ static void unmap_folio(struct folio *folio)
>         try_to_unmap_flush();
>  }
>
> +static bool __discard_trans_pmd_locked(struct vm_area_struct *vma,
> +                                      unsigned long addr, pmd_t *pmdp,
> +                                      struct folio *folio)
> +{
> +       struct mm_struct *mm = vma->vm_mm;
> +       int ref_count, map_count;
> +       pmd_t orig_pmd = *pmdp;
> +       struct mmu_gather tlb;
> +       struct page *page;
> +
> +       if (pmd_dirty(orig_pmd) || folio_test_dirty(folio))
> +               return false;
> +       if (unlikely(!pmd_present(orig_pmd) || !pmd_trans_huge(orig_pmd)))
> +               return false;
> +
> +       page = pmd_page(orig_pmd);
> +       if (unlikely(page_folio(page) != folio))
> +               return false;
> +
> +       tlb_gather_mmu(&tlb, mm);
> +       orig_pmd = pmdp_huge_get_and_clear(mm, addr, pmdp);
> +       tlb_remove_pmd_tlb_entry(&tlb, pmdp, addr);
> +
> +       /*
> +        * Syncing against concurrent GUP-fast:
> +        * - clear PMD; barrier; read refcount
> +        * - inc refcount; barrier; read PMD
> +        */
> +       smp_mb();
> +
> +       ref_count = folio_ref_count(folio);
> +       map_count = folio_mapcount(folio);
> +
> +       /*
> +        * Order reads for folio refcount and dirty flag
> +        * (see comments in __remove_mapping()).
> +        */
> +       smp_rmb();
> +
> +       /*
> +        * If the PMD or folio is redirtied at this point, or if there are
> +        * unexpected references, we will give up to discard this folio
> +        * and remap it.
> +        *
> +        * The only folio refs must be one from isolation plus the rmap(s).
> +        */
> +       if (ref_count != map_count + 1 || folio_test_dirty(folio) ||
> +           pmd_dirty(orig_pmd)) {
> +               set_pmd_at(mm, addr, pmdp, orig_pmd);
> +               return false;
> +       }
> +
> +       folio_remove_rmap_pmd(folio, page, vma);
> +       zap_deposited_table(mm, pmdp);
> +       add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> +       folio_put(folio);
> +
> +       return true;
> +}
> +
> +bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
> +                          pmd_t *pmdp, struct folio *folio)
> +{
> +       VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
> +       VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
> +       VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
> +
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +       if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
> +               return __discard_trans_pmd_locked(vma, addr, pmdp, folio);
> +#endif

this is weird and huge_memory.c is only built with
CONFIG_TRANSPARENT_HUGEPAGE = y;

mm/Makefile:
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o

> +
> +       return false;
> +}
> +
>  static void remap_page(struct folio *folio, unsigned long nr)
>  {
>         int i = 0;
> diff --git a/mm/rmap.c b/mm/rmap.c
> index e42f436c7ff3..ab37af4f47aa 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1677,6 +1677,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>                 }
>
>                 if (!pvmw.pte && (flags & TTU_SPLIT_HUGE_PMD)) {
> +                       if (unmap_huge_pmd_locked(vma, range.start, pvmw.pmd,
> +                                                 folio))
> +                               goto walk_done;

this is making
mm/rmap.c:1680: undefined reference to `unmap_huge_pmd_locked'
mm/rmap.c:1687: undefined reference to `split_huge_pmd_locked'

>                         /*
>                          * We temporarily have to drop the PTL and start once
>                          * again from that now-PTE-mapped page table.
> --
> 2.33.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ