lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 10 Jan 2018 16:40:38 +0100
From:   Dmitry Vyukov <dvyukov@...gle.com>
To:     Andrey Ryabinin <aryabinin@...tuozzo.com>
Cc:     Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H. Peter Anvin" <hpa@...or.com>,
        "the arch/x86 maintainers" <x86@...nel.org>,
        kasan-dev <kasan-dev@...glegroups.com>,
        LKML <linux-kernel@...r.kernel.org>,
        Alexander Potapenko <glider@...gle.com>,
        kernel test robot <xiaolong.ye@...el.com>, LKP <lkp@...org>
Subject: Re: [PATCH] x86/kasan: panic if there is not enough memory to boot.

On Wed, Jan 10, 2018 at 4:36 PM, Andrey Ryabinin
<aryabinin@...tuozzo.com> wrote:
> Currently KASAN doesn't panic in case it don't have enough memory
> to boot. Instead, it crashes in some random place:
>
>  kernel BUG at arch/x86/mm/physaddr.c:27!
>
>  RIP: 0010:__phys_addr+0x268/0x276
>  Call Trace:
>   kasan_populate_shadow+0x3f2/0x497
>   kasan_init+0x12e/0x2b2
>   setup_arch+0x2825/0x2a2c
>   start_kernel+0xc8/0x15f4
>   x86_64_start_reservations+0x2a/0x2c
>   x86_64_start_kernel+0x72/0x75
>   secondary_startup_64+0xa5/0xb0
>
> Use memblock_virt_alloc_try_nid() for allocations without failure
> fallback. It will panic with an out of memory message.
>
> Reported-by: kernel test robot <xiaolong.ye@...el.com>
> Signed-off-by: Andrey Ryabinin <aryabinin@...tuozzo.com>
> ---
>  arch/x86/mm/kasan_init_64.c | 24 ++++++++++++++----------
>  1 file changed, 14 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
> index 47388f0c0e59..af6f2f9c6a26 100644
> --- a/arch/x86/mm/kasan_init_64.c
> +++ b/arch/x86/mm/kasan_init_64.c
> @@ -21,10 +21,14 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
>
>  static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
>
> -static __init void *early_alloc(size_t size, int nid)
> +static __init void *early_alloc(size_t size, int nid, bool panic)
>  {
> -       return memblock_virt_alloc_try_nid_nopanic(size, size,
> -               __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
> +       if (panic)
> +               return memblock_virt_alloc_try_nid(size, size,
> +                       __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
> +       else
> +               return memblock_virt_alloc_try_nid_nopanic(size, size,
> +                       __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
>  }
>
>  static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
> @@ -38,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
>                 if (boot_cpu_has(X86_FEATURE_PSE) &&
>                     ((end - addr) == PMD_SIZE) &&
>                     IS_ALIGNED(addr, PMD_SIZE)) {
> -                       p = early_alloc(PMD_SIZE, nid);
> +                       p = early_alloc(PMD_SIZE, nid, false);
>                         if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
>                                 return;
>                         else if (p)
>                                 memblock_free(__pa(p), PMD_SIZE);
>                 }
>
> -               p = early_alloc(PAGE_SIZE, nid);
> +               p = early_alloc(PAGE_SIZE, nid, true);
>                 pmd_populate_kernel(&init_mm, pmd, p);
>         }
>
> @@ -57,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
>                 if (!pte_none(*pte))
>                         continue;
>
> -               p = early_alloc(PAGE_SIZE, nid);
> +               p = early_alloc(PAGE_SIZE, nid, true);
>                 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
>                 set_pte_at(&init_mm, addr, pte, entry);
>         } while (pte++, addr += PAGE_SIZE, addr != end);
> @@ -75,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
>                 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
>                     ((end - addr) == PUD_SIZE) &&
>                     IS_ALIGNED(addr, PUD_SIZE)) {
> -                       p = early_alloc(PUD_SIZE, nid);
> +                       p = early_alloc(PUD_SIZE, nid, false);
>                         if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
>                                 return;
>                         else if (p)
>                                 memblock_free(__pa(p), PUD_SIZE);
>                 }
>
> -               p = early_alloc(PAGE_SIZE, nid);
> +               p = early_alloc(PAGE_SIZE, nid, true);
>                 pud_populate(&init_mm, pud, p);
>         }
>
> @@ -101,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
>         unsigned long next;
>
>         if (p4d_none(*p4d)) {
> -               void *p = early_alloc(PAGE_SIZE, nid);
> +               void *p = early_alloc(PAGE_SIZE, nid, true);
>
>                 p4d_populate(&init_mm, p4d, p);
>         }
> @@ -122,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
>         unsigned long next;
>
>         if (pgd_none(*pgd)) {
> -               p = early_alloc(PAGE_SIZE, nid);
> +               p = early_alloc(PAGE_SIZE, nid, true);
>                 pgd_populate(&init_mm, pgd, p);
>         }

Acked-by: Dmitry Vyukov <dvyukov@...gle.com>

Thanks!

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ