lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAFgQCTuhW6sPtCNFmnz13p30v3owE3Rty5WJNgtqgz8XaZT-aQ@mail.gmail.com>
Date:   Thu, 13 Dec 2018 16:37:35 +0800
From:   Pingfan Liu <kernelfans@...il.com>
To:     mhocko@...nel.org
Cc:     Vlastimil Babka <vbabka@...e.cz>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org,
        Andrew Morton <akpm@...ux-foundation.org>,
        Mike Rapoport <rppt@...ux.vnet.ibm.com>,
        Bjorn Helgaas <bhelgaas@...gle.com>,
        Jonathan Cameron <Jonathan.Cameron@...wei.com>
Subject: Re: [PATCH] mm/alloc: fallback to first node if the wanted node offline

On Wed, Dec 12, 2018 at 7:53 PM Michal Hocko <mhocko@...nel.org> wrote:
>
> On Wed 12-12-18 16:31:35, Pingfan Liu wrote:
> > On Mon, Dec 10, 2018 at 8:37 PM Michal Hocko <mhocko@...nel.org> wrote:
> > >
> > [...]
> > >
> > > In other words. Does the following work? I am sorry to wildguess this
> > > way but I am not able to recreate your setups to play with this myself.
> > >
> > > diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
> > > index 1308f5408bf7..d51643e10d00 100644
> > > --- a/arch/x86/mm/numa.c
> > > +++ b/arch/x86/mm/numa.c
> > > @@ -216,8 +216,6 @@ static void __init alloc_node_data(int nid)
> > >
> > >         node_data[nid] = nd;
> > >         memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
> > > -
> > > -       node_set_online(nid);
> > >  }
> > >
> > >  /**
> > > @@ -527,6 +525,19 @@ static void __init numa_clear_kernel_node_hotplug(void)
> > >         }
> > >  }
> > >
> > > +static void __init init_memory_less_node(int nid)
> > > +{
> > > +       unsigned long zones_size[MAX_NR_ZONES] = {0};
> > > +       unsigned long zholes_size[MAX_NR_ZONES] = {0};
> > > +
> > > +       free_area_init_node(nid, zones_size, 0, zholes_size);
> > > +
> > > +       /*
> > > +        * All zonelists will be built later in start_kernel() after per cpu
> > > +        * areas are initialized.
> > > +        */
> > > +}
> > > +
> > >  static int __init numa_register_memblks(struct numa_meminfo *mi)
> > >  {
> > >         unsigned long uninitialized_var(pfn_align);
> > > @@ -570,7 +581,7 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
> > >                 return -EINVAL;
> > >
> > >         /* Finally register nodes. */
> > > -       for_each_node_mask(nid, node_possible_map) {
> > > +       for_each_node(nid) {
> > >                 u64 start = PFN_PHYS(max_pfn);
> > >                 u64 end = 0;
> > >
> > > @@ -592,6 +603,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
> > >                         continue;
> > >
> > >                 alloc_node_data(nid);
> > > +               if (!end)
> >
> > Here comes the bug, since !end can not reach here.
>
> You are right. I am dumb. I've just completely missed that. Sigh.
> Anyway, I think the code is more complicated than necessary and we can
> simply drop the check. I do not think we really have to worry about
> the start overflowing end. So the end patch should look as follows.
> Btw. I believe it is better to pull alloc_node_data out of init_memory_less_node
> because a) there is no need to duplicate the call and moreover we want
> to pull node_set_online as well. The code also seems cleaner this way.
>
I have no strong opinion here.
> Thanks for your testing and your patience with me here.
Np.
>
> diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
> index 1308f5408bf7..a5548fe668fb 100644
> --- a/arch/x86/mm/numa.c
> +++ b/arch/x86/mm/numa.c
> @@ -216,8 +216,6 @@ static void __init alloc_node_data(int nid)
>
>         node_data[nid] = nd;
>         memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
> -
> -       node_set_online(nid);
>  }
>
>  /**
> @@ -527,6 +525,19 @@ static void __init numa_clear_kernel_node_hotplug(void)
>         }
>  }
>
> +static void __init init_memory_less_node(int nid)
> +{
> +       unsigned long zones_size[MAX_NR_ZONES] = {0};
> +       unsigned long zholes_size[MAX_NR_ZONES] = {0};
> +
> +       free_area_init_node(nid, zones_size, 0, zholes_size);
> +
> +       /*
> +        * All zonelists will be built later in start_kernel() after per cpu
> +        * areas are initialized.
> +        */
> +}
> +
>  static int __init numa_register_memblks(struct numa_meminfo *mi)
>  {
>         unsigned long uninitialized_var(pfn_align);
> @@ -570,7 +581,7 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
>                 return -EINVAL;
>
>         /* Finally register nodes. */
> -       for_each_node_mask(nid, node_possible_map) {
> +       for_each_node(nid) {
>                 u64 start = PFN_PHYS(max_pfn);
>                 u64 end = 0;
>
> @@ -581,9 +592,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
>                         end = max(mi->blk[i].end, end);
>                 }
>
> -               if (start >= end)
> -                       continue;
> -
>                 /*
>                  * Don't confuse VM with a node that doesn't have the
>                  * minimum amount of memory:
> @@ -592,6 +600,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
>                         continue;
>
>                 alloc_node_data(nid);
> +               if (!end)
> +                       init_memory_less_node(nid);
> +               else
> +                       node_set_online(nid);
>         }
>
>         /* Dump memblock with node info and return. */
> @@ -721,21 +733,6 @@ void __init x86_numa_init(void)
>         numa_init(dummy_numa_init);
>  }
>
> -static void __init init_memory_less_node(int nid)
> -{
> -       unsigned long zones_size[MAX_NR_ZONES] = {0};
> -       unsigned long zholes_size[MAX_NR_ZONES] = {0};
> -
> -       /* Allocate and initialize node data. Memory-less node is now online.*/
> -       alloc_node_data(nid);
> -       free_area_init_node(nid, zones_size, 0, zholes_size);
> -
> -       /*
> -        * All zonelists will be built later in start_kernel() after per cpu
> -        * areas are initialized.
> -        */
> -}
> -
>  /*
>   * Setup early cpu_to_node.
>   *
> @@ -763,9 +760,6 @@ void __init init_cpu_to_node(void)
>                 if (node == NUMA_NO_NODE)
>                         continue;
>
> -               if (!node_online(node))
> -                       init_memory_less_node(node);
> -
>                 numa_set_node(cpu, node);
>         }
>  }
> --
Regret, it still has bug, and I got panic. Attached log.

Thanks,
Pingfan

View attachment "1213.txt" of type "text/plain" (22233 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ