lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAHbLzkqFba6c7=PM39sPqBdZyc+Yqp=9v2SxdwWM-4Z946ZsRw@mail.gmail.com>
Date:   Wed, 9 Jun 2021 09:56:55 -0700
From:   Yang Shi <shy828301@...il.com>
To:     Hugh Dickins <hughd@...gle.com>
Cc:     Andrew Morton <akpm@...ux-foundation.org>,
        "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
        Wang Yugui <wangyugui@...-tech.com>,
        Matthew Wilcox <willy@...radead.org>,
        Naoya Horiguchi <naoya.horiguchi@....com>,
        Alistair Popple <apopple@...dia.com>,
        Ralph Campbell <rcampbell@...dia.com>, Zi Yan <ziy@...dia.com>,
        Miaohe Lin <linmiaohe@...wei.com>,
        Minchan Kim <minchan@...nel.org>, Jue Wang <juew@...gle.com>,
        Peter Xu <peterx@...hat.com>, Jan Kara <jack@...e.cz>,
        Shakeel Butt <shakeelb@...gle.com>,
        Oscar Salvador <osalvador@...e.de>,
        Linux MM <linux-mm@...ck.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v2 02/10] mm/thp: make is_huge_zero_pmd() safe and quicker

On Tue, Jun 8, 2021 at 9:08 PM Hugh Dickins <hughd@...gle.com> wrote:
>
> Most callers of is_huge_zero_pmd() supply a pmd already verified present;
> but a few (notably zap_huge_pmd()) do not - it might be a pmd migration
> entry, in which the pfn is encoded differently from a present pmd: which
> might pass the is_huge_zero_pmd() test (though not on x86, since L1TF
> forced us to protect against that); or perhaps even crash in pmd_page()
> applied to a swap-like entry.
>
> Make it safe by adding pmd_present() check into is_huge_zero_pmd() itself;
> and make it quicker by saving huge_zero_pfn, so that is_huge_zero_pmd()
> will not need to do that pmd_page() lookup each time.
>
> __split_huge_pmd_locked() checked pmd_trans_huge() before: that worked,
> but is unnecessary now that is_huge_zero_pmd() checks present.
>
> Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
> Signed-off-by: Hugh Dickins <hughd@...gle.com>
> Cc: <stable@...r.kernel.org>

Reviewed-by: Yang Shi <shy828301@...il.com>

> ---
> Patch added (replacing part of first) since the v1 series was posted.
>
>  include/linux/huge_mm.h | 8 +++++++-
>  mm/huge_memory.c        | 5 ++++-
>  2 files changed, 11 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 9626fda5efce..2a8ebe6c222e 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -286,6 +286,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
>  vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
>
>  extern struct page *huge_zero_page;
> +extern unsigned long huge_zero_pfn;
>
>  static inline bool is_huge_zero_page(struct page *page)
>  {
> @@ -294,7 +295,7 @@ static inline bool is_huge_zero_page(struct page *page)
>
>  static inline bool is_huge_zero_pmd(pmd_t pmd)
>  {
> -       return is_huge_zero_page(pmd_page(pmd));
> +       return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
>  }
>
>  static inline bool is_huge_zero_pud(pud_t pud)
> @@ -440,6 +441,11 @@ static inline bool is_huge_zero_page(struct page *page)
>         return false;
>  }
>
> +static inline bool is_huge_zero_pmd(pmd_t pmd)
> +{
> +       return false;
> +}
> +
>  static inline bool is_huge_zero_pud(pud_t pud)
>  {
>         return false;
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 42cfefc6e66e..5885c5f5836f 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -62,6 +62,7 @@ static struct shrinker deferred_split_shrinker;
>
>  static atomic_t huge_zero_refcount;
>  struct page *huge_zero_page __read_mostly;
> +unsigned long huge_zero_pfn __read_mostly = ~0UL;
>
>  bool transparent_hugepage_enabled(struct vm_area_struct *vma)
>  {
> @@ -98,6 +99,7 @@ static bool get_huge_zero_page(void)
>                 __free_pages(zero_page, compound_order(zero_page));
>                 goto retry;
>         }
> +       WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
>
>         /* We take additional reference here. It will be put back by shrinker */
>         atomic_set(&huge_zero_refcount, 2);
> @@ -147,6 +149,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
>         if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
>                 struct page *zero_page = xchg(&huge_zero_page, NULL);
>                 BUG_ON(zero_page == NULL);
> +               WRITE_ONCE(huge_zero_pfn, ~0UL);
>                 __free_pages(zero_page, compound_order(zero_page));
>                 return HPAGE_PMD_NR;
>         }
> @@ -2071,7 +2074,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
>                 return;
>         }
>
> -       if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
> +       if (is_huge_zero_pmd(*pmd)) {
>                 /*
>                  * FIXME: Do we want to invalidate secondary mmu by calling
>                  * mmu_notifier_invalidate_range() see comments below inside
> --
> 2.26.2
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ