lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 27 Jun 2023 21:23:40 +0800
From:   Liam Ni <zhiguangni01@...il.com>
To:     rppt@...nel.org
Cc:     dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
        tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, x86@...nel.org,
        hpa@...or.com, akpm@...ux-foundation.org,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH V2] x86,NUMA:improve the execution efficiency of numa_meminfo_cover_memory()

friendly ping

On Mon, 19 Jun 2023 at 15:53, Liam Ni <zhiguangni01@...il.com> wrote:
>
> The number of pages in memblock that doesn't have the node
> assigned,which also means that these pages are not in numa_info.
> So these pages can represent the number of lose pages.
>
> Signed-off-by: Liam Ni <zhiguangni01@...il.com>
> ---
>  arch/x86/mm/numa.c | 25 ++++++-------------------
>  include/linux/mm.h |  2 ++
>  mm/mm_init.c       | 26 ++++++++++++++++++++++++++
>  3 files changed, 34 insertions(+), 19 deletions(-)
>
> diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
> index 2aadb2019b4f..ffe3b771eac7 100644
> --- a/arch/x86/mm/numa.c
> +++ b/arch/x86/mm/numa.c
> @@ -451,28 +451,15 @@ EXPORT_SYMBOL(__node_distance);
>   * Sanity check to catch more bad NUMA configurations (they are amazingly
>   * common).  Make sure the nodes cover all memory.
>   */
> -static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
> +static bool __init numa_meminfo_cover_memory(void)
>  {
> -       u64 numaram, e820ram;
> -       int i;
> -
> -       numaram = 0;
> -       for (i = 0; i < mi->nr_blks; i++) {
> -               u64 s = mi->blk[i].start >> PAGE_SHIFT;
> -               u64 e = mi->blk[i].end >> PAGE_SHIFT;
> -               numaram += e - s;
> -               numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
> -               if ((s64)numaram < 0)
> -                       numaram = 0;
> -       }
> +       u64 lo_pages;
>
> -       e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
> +       lo_pages = without_node_pages_in_range(0, max_pfn);
>
>         /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
> -       if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
> -               printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
> -                      (numaram << PAGE_SHIFT) >> 20,
> -                      (e820ram << PAGE_SHIFT) >> 20);
> +       if (lo_pages >= (1 << (20 - PAGE_SHIFT))) {
> +               printk(KERN_ERR "NUMA: we lose more than 1M pages\n");
>                 return false;
>         }
>         return true;
> @@ -583,7 +570,7 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
>                         return -EINVAL;
>                 }
>         }
> -       if (!numa_meminfo_cover_memory(mi))
> +       if (!numa_meminfo_cover_memory())
>                 return -EINVAL;
>
>         /* Finally register nodes. */
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 0daef3f2f029..99b7b49f1378 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3043,6 +3043,8 @@ unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
>                                                 unsigned long end_pfn);
>  extern unsigned long absent_pages_in_range(unsigned long start_pfn,
>                                                 unsigned long end_pfn);
> +extern unsigned long without_node_pages_in_range(unsigned long start_pfn,
> +                                               unsigned long end_pfn);
>  extern void get_pfn_range_for_nid(unsigned int nid,
>                         unsigned long *start_pfn, unsigned long *end_pfn);
>
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 3ddd18a89b66..1d5085a8f93b 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -1132,6 +1132,32 @@ static void __init adjust_zone_range_for_zone_movable(int nid,
>         }
>  }
>
> +/**
> + * @start_pfn: The start PFN to start searching for holes
> + * @end_pfn: The end PFN to stop searching for holes
> + *
> + * Return: Return the number of page frames without node assigned within a range.
> + */
> +unsigned long __init without_node_pages_in_range(unsigned long start_pfn,
> +                                               unsigned long end_pfn)
> +{
> +       struct memblock_type *type = &memblock.memory;
> +       struct memblock_region *r;
> +       unsigned long num_pages;
> +       int i, nid;
> +
> +       for (i = 0; i < type->cnt; i++) {
> +               r = &type->regions[i];
> +               nid = memblock_get_region_node(r);
> +
> +               if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
> +                       continue;
> +               if (nid == NUMA_NO_NODE)
> +                       num_pages += PFN_DOWN(r->base + r->size) - PFN_UP(r->base);
> +       }
> +       return num_pages;
> +}
> +
>  /*
>   * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
>   * then all holes in the requested range will be accounted for.
> --
> 2.25.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ