lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Thu, 13 Jun 2024 14:55:21 +0800
From: Lance Yang <ioworker0@...il.com>
To: baolin.wang@...ux.alibaba.com,
	david@...hat.com
Cc: ioworker0@...il.com,
	21cnbao@...il.com,
	akpm@...ux-foundation.org,
	fengwei.yin@...el.com,
	libang.li@...group.com,
	linux-kernel@...r.kernel.org,
	linux-mm@...ck.org,
	maskray@...gle.com,
	mhocko@...e.com,
	minchan@...nel.org,
	peterx@...hat.com,
	ryan.roberts@....com,
	shy828301@...il.com,
	sj@...nel.org,
	songmuchun@...edance.com,
	wangkefeng.wang@...wei.com,
	willy@...radead.org,
	xiehuan09@...il.com,
	ziy@...dia.com,
	zokeefe@...gle.com
Subject: Re: [PATCH v7 4/4] mm/vmscan: avoid split lazyfree THP during shrink_folio_list()


On Mon, Jun 10, 2024 at 8:08 PM Lance Yang <ioworker0@...il.com> wrote:
>
[...]
> +static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
> +                                           unsigned long addr, pmd_t *pmdp,
> +                                           struct folio *folio)
> +{
> +       VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
> +       VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
> +
> +       struct mm_struct *mm = vma->vm_mm;
> +       int ref_count, map_count;
> +       pmd_t orig_pmd = *pmdp;
> +       struct page *page;
> +
> +       if (unlikely(!pmd_present(orig_pmd) || !pmd_trans_huge(orig_pmd)))
> +               return false;
> +
> +       page = pmd_page(orig_pmd);
> +       if (unlikely(page_folio(page) != folio))
> +               return false;
> +
> +       if (folio_test_dirty(folio) || pmd_dirty(orig_pmd)) {
> +               folio_set_swapbacked(folio);
> +               return false;
> +       }
> +
> +       orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
> +
> +       /*
> +        * Syncing against concurrent GUP-fast:
> +        * - clear PMD; barrier; read refcount
> +        * - inc refcount; barrier; read PMD
> +        */
> +       smp_mb();
> +
> +       ref_count = folio_ref_count(folio);
> +       map_count = folio_mapcount(folio);
> +
> +       /*
> +        * Order reads for folio refcount and dirty flag
> +        * (see comments in __remove_mapping()).
> +        */
> +       smp_rmb();
> +
> +       /*
> +        * If the folio or its PMD is redirtied at this point, or if there
> +        * are unexpected references, we will give up to discard this folio
> +        * and remap it.
> +        *
> +        * The only folio refs must be one from isolation plus the rmap(s).
> +        */
> +       if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
> +               folio_set_swapbacked(folio);
> +
> +       if (folio_test_swapbacked(folio) || ref_count != map_count + 1) {
> +               set_pmd_at(mm, addr, pmdp, orig_pmd);
> +               return false;
> +       }
> +
> +       folio_remove_rmap_pmd(folio, page, vma);
> +       zap_deposited_table(mm, pmdp);
> +       add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> +       if (vma->vm_flags & VM_LOCKED)
> +               mlock_drain_local();
> +       folio_put(folio);
> +
> +       return true;
> +}
[...]
> diff --git a/mm/rmap.c b/mm/rmap.c
> index b77f88695588..8e901636ade9 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1630,6 +1630,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>         enum ttu_flags flags = (enum ttu_flags)(long)arg;
>         unsigned long pfn;
>         unsigned long hsz = 0;
> +       bool pmd_mapped = false;
>
>         /*
>          * When racing against e.g. zap_pte_range() on another cpu,
> @@ -1676,16 +1677,24 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>                         goto walk_done_err;
>                 }
>
> -               if (!pvmw.pte && (flags & TTU_SPLIT_HUGE_PMD)) {
> -                       /*
> -                        * We temporarily have to drop the PTL and start once
> -                        * again from that now-PTE-mapped page table.
> -                        */
> -                       split_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
> -                                             false, folio);
> -                       flags &= ~TTU_SPLIT_HUGE_PMD;
> -                       page_vma_mapped_walk_restart(&pvmw);
> -                       continue;
> +               if (!pvmw.pte) {
> +                       pmd_mapped = true;
> +                       if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
> +                                                 folio))
> +                               goto walk_done;
> +
> +                       if (flags & TTU_SPLIT_HUGE_PMD) {
> +                               /*
> +                                * We temporarily have to drop the PTL and start
> +                                * once again from that now-PTE-mapped page
> +                                * table.
> +                                */
> +                               split_huge_pmd_locked(vma, pvmw.address,
> +                                                     pvmw.pmd, false, folio);
> +                               flags &= ~TTU_SPLIT_HUGE_PMD;
> +                               page_vma_mapped_walk_restart(&pvmw);
> +                               continue;
> +                       }
>                 }
>
>                 /* Unexpected PMD-mapped THP? */
> @@ -1813,7 +1822,12 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>                          */
>                         if (unlikely(folio_test_swapbacked(folio) !=
>                                         folio_test_swapcache(folio))) {
> -                               WARN_ON_ONCE(1);
> +                               /*
> +                                * unmap_huge_pmd_locked() will unmark a
> +                                * PMD-mapped folio as lazyfree if the folio or
> +                                * its PMD was redirtied.
> +                                */
> +                               WARN_ON_ONCE(!pmd_mapped);

Damn it. I forgot to remap the folio to the page table since we've cleared
the PTE here.

But it seems like we neither need to do that, nor unmark a PMD-mapped folio
as lazyfree in unmap_huge_pmd_locked(); the next block[1] will catch the
folio and do the same thing.

Hi David and Baolin, what do you think?

>                                 goto walk_done_err;
>                         }

[1] https://elixir.bootlin.com/linux/v6.10-rc3/source/mm/rmap.c#L1820

/* MADV_FREE page check */
if (!folio_test_swapbacked(folio)) {
	int ref_count, map_count;

[...]
	/*
	 * The only page refs must be one from isolation
	 * plus the rmap(s) (dropped by discard:).
	 */
	if (ref_count == 1 + map_count &&
	    !folio_test_dirty(folio)) {
		dec_mm_counter(mm, MM_ANONPAGES);
		goto discard;
	}

	/*
	 * If the folio was redirtied, it cannot be
	 * discarded. Remap the page to page table.
	 */
	set_pte_at(mm, address, pvmw.pte, pteval);
	folio_set_swapbacked(folio);
	goto walk_done_err;
}


Thanks,
Lance

>
> --
> 2.33.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ