lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 13 Oct 2021 14:46:00 -0700
From:   Shakeel Butt <shakeelb@...gle.com>
To:     Chen Wandun <chenwandun@...wei.com>
Cc:     Andrew Morton <akpm@...ux-foundation.org>,
        Nicholas Piggin <npiggin@...il.com>,
        Linux MM <linux-mm@...ck.org>,
        LKML <linux-kernel@...r.kernel.org>,
        Eric Dumazet <edumazet@...gle.com>,
        Kefeng Wang <wangkefeng.wang@...wei.com>, guohanjun@...wei.com
Subject: Re: [PATCH] mm/vmalloc: fix numa spreading for large hash tables

On Tue, Sep 28, 2021 at 5:03 AM Chen Wandun <chenwandun@...wei.com> wrote:
>
> Eric Dumazet reported a strange numa spreading info in [1], and found
> commit 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings") introduced
> this issue [2].
>
> Dig into the difference before and after this patch, page allocation has
> some difference:
>
> before:
> alloc_large_system_hash
>     __vmalloc
>         __vmalloc_node(..., NUMA_NO_NODE, ...)
>             __vmalloc_node_range
>                 __vmalloc_area_node
>                     alloc_page /* because NUMA_NO_NODE, so choose alloc_page branch */
>                         alloc_pages_current
>                             alloc_page_interleave /* can be proved by print policy mode */
>
> after:
> alloc_large_system_hash
>     __vmalloc
>         __vmalloc_node(..., NUMA_NO_NODE, ...)
>             __vmalloc_node_range
>                 __vmalloc_area_node
>                     alloc_pages_node /* choose nid by nuam_mem_id() */
>                         __alloc_pages_node(nid, ....)
>
> So after commit 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings"),
> it will allocate memory in current node instead of interleaving allocate
> memory.
>
> [1]
> https://lore.kernel.org/linux-mm/CANn89iL6AAyWhfxdHO+jaT075iOa3XcYn9k6JJc7JR2XYn6k_Q@mail.gmail.com/
>
> [2]
> https://lore.kernel.org/linux-mm/CANn89iLofTR=AK-QOZY87RdUZENCZUT4O6a0hvhu3_EwRMerOg@mail.gmail.com/
>
> Fixes: 121e6f3258fe ("mm/vmalloc: hugepage vmalloc mappings")
> Reported-by: Eric Dumazet <edumazet@...gle.com>
> Signed-off-by: Chen Wandun <chenwandun@...wei.com>
> ---
>  mm/vmalloc.c | 33 ++++++++++++++++++++++++++-------
>  1 file changed, 26 insertions(+), 7 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index f884706c5280..48e717626e94 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2823,6 +2823,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
>                 unsigned int order, unsigned int nr_pages, struct page **pages)
>  {
>         unsigned int nr_allocated = 0;
> +       struct page *page;
> +       int i;
>
>         /*
>          * For order-0 pages we make use of bulk allocator, if
> @@ -2833,6 +2835,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
>         if (!order) {

Can you please replace the above with if (!order && nid != NUMA_NO_NODE)?

>                 while (nr_allocated < nr_pages) {
>                         unsigned int nr, nr_pages_request;
> +                       page = NULL;
>
>                         /*
>                          * A maximum allowed request is hard-coded and is 100
> @@ -2842,9 +2845,23 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
>                          */
>                         nr_pages_request = min(100U, nr_pages - nr_allocated);
>

Undo the following change in this if block.

> -                       nr = alloc_pages_bulk_array_node(gfp, nid,
> -                               nr_pages_request, pages + nr_allocated);
> -
> +                       if (nid == NUMA_NO_NODE) {
> +                               for (i = 0; i < nr_pages_request; i++) {
> +                                       page = alloc_page(gfp);
> +                                       if (page)
> +                                               pages[nr_allocated + i] = page;
> +                                       else {
> +                                               nr = i;
> +                                               break;
> +                                       }
> +                               }
> +                               if (i >= nr_pages_request)
> +                                       nr = nr_pages_request;
> +                       } else {
> +                               nr = alloc_pages_bulk_array_node(gfp, nid,
> +                                                       nr_pages_request,
> +                                                       pages + nr_allocated);
> +                       }
>                         nr_allocated += nr;
>                         cond_resched();
>
> @@ -2863,11 +2880,13 @@ vm_area_alloc_pages(gfp_t gfp, int nid,

Put the following line under "else if (order)"

>                 gfp |= __GFP_COMP;
>
>         /* High-order pages or fallback path if "bulk" fails. */
> -       while (nr_allocated < nr_pages) {

Keep the following declarations inside the while loop.

> -               struct page *page;
> -               int i;
>
> -               page = alloc_pages_node(nid, gfp, order);
> +       page = NULL;
> +       while (nr_allocated < nr_pages) {
> +               if (nid == NUMA_NO_NODE)
> +                       page = alloc_pages(gfp, order);
> +               else
> +                       page = alloc_pages_node(nid, gfp, order);
>                 if (unlikely(!page))
>                         break;
>
> --
> 2.25.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ