lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHbLzkpb2Bs8buDOAGCt7hpjy2824HfK3RsTHM+gbzmZ1wvKRA@mail.gmail.com>
Date:   Thu, 3 Feb 2022 14:18:56 -0800
From:   Yang Shi <shy828301@...il.com>
To:     Andrew Morton <akpm@...ux-foundation.org>
Cc:     "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
        Jann Horn <jannh@...gle.com>,
        Matthew Wilcox <willy@...radead.org>,
        David Hildenbrand <david@...hat.com>,
        Linux MM <linux-mm@...ck.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        stable <stable@...r.kernel.org>
Subject: Re: [v4 PATCH] fs/proc: task_mmu.c: don't read mapcount for migration entry

On Thu, Feb 3, 2022 at 2:12 PM Andrew Morton <akpm@...ux-foundation.org> wrote:
>
> On Thu,  3 Feb 2022 10:26:41 -0800 Yang Shi <shy828301@...il.com> wrote:
>
> > v4: * s/Treated/Treat per David
> >     * Collected acked-by tag from David
> > v3: * Fixed the fix tag, the one used by v2 was not accurate
> >     * Added comment about the risk calling page_mapcount() per David
> >     * Fix pagemap
> > v2: * Added proper fix tag per Jann Horn
> >     * Rebased to the latest linus's tree
>
> The v2->v4 delta shows changes which aren't described above?

They are.

v4: * s/Treated/Treat per David
      * Collected acked-by tag from David
v3: * Fixed the fix tag, the one used by v2 was not accurate
      * Added comment about the risk calling page_mapcount() per David
      * Fix pagemap

>
> --- a/fs/proc/task_mmu.c~fs-proc-task_mmuc-dont-read-mapcount-for-migration-entry-v4
> +++ a/fs/proc/task_mmu.c
> @@ -469,9 +469,12 @@ static void smaps_account(struct mem_siz
>          * If any subpage of the compound page mapped with PTE it would elevate
>          * page_count().
>          *
> -        * Treated regular migration entries as mapcount == 1 without reading
> -        * mapcount since calling page_mapcount() for migration entries is
> -        * racy against THP splitting.
> +        * The page_mapcount() is called to get a snapshot of the mapcount.
> +        * Without holding the page lock this snapshot can be slightly wrong as
> +        * we cannot always read the mapcount atomically.  It is not safe to
> +        * call page_mapcount() even with PTL held if the page is not mapped,
> +        * especially for migration entries.  Treat regular migration entries
> +        * as mapcount == 1.
>          */
>         if ((page_count(page) == 1) || migration) {
>                 smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
> @@ -1393,6 +1396,7 @@ static pagemap_entry_t pte_to_pagemap_en
>  {
>         u64 frame = 0, flags = 0;
>         struct page *page = NULL;
> +       bool migration = false;
>
>         if (pte_present(pte)) {
>                 if (pm->show_pfn)
> @@ -1414,13 +1418,14 @@ static pagemap_entry_t pte_to_pagemap_en
>                         frame = swp_type(entry) |
>                                 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
>                 flags |= PM_SWAP;
> +               migration = is_migration_entry(entry);
>                 if (is_pfn_swap_entry(entry))
>                         page = pfn_swap_entry_to_page(entry);
>         }
>
>         if (page && !PageAnon(page))
>                 flags |= PM_FILE;
> -       if (page && page_mapcount(page) == 1)
> +       if (page && !migration && page_mapcount(page) == 1)
>                 flags |= PM_MMAP_EXCLUSIVE;
>         if (vma->vm_flags & VM_SOFTDIRTY)
>                 flags |= PM_SOFT_DIRTY;
> @@ -1436,6 +1441,7 @@ static int pagemap_pmd_range(pmd_t *pmdp
>         spinlock_t *ptl;
>         pte_t *pte, *orig_pte;
>         int err = 0;
> +       bool migration = false;
>
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>         ptl = pmd_trans_huge_lock(pmdp, vma);
> @@ -1476,11 +1482,12 @@ static int pagemap_pmd_range(pmd_t *pmdp
>                         if (pmd_swp_uffd_wp(pmd))
>                                 flags |= PM_UFFD_WP;
>                         VM_BUG_ON(!is_pmd_migration_entry(pmd));
> +                       migration = is_migration_entry(entry);
>                         page = pfn_swap_entry_to_page(entry);
>                 }
>  #endif
>
> -               if (page && page_mapcount(page) == 1)
> +               if (page && !migration && page_mapcount(page) == 1)
>                         flags |= PM_MMAP_EXCLUSIVE;
>
>                 for (; addr != end; addr += PAGE_SIZE) {
> _
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ