lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANpmjNNuaYneLb3ScSwF=o0DnECBt4NRkBZJuwRqBrOKnTGPbA@mail.gmail.com>
Date:   Mon, 24 May 2021 12:04:18 +0200
From:   Marco Elver <elver@...gle.com>
To:     Jisheng Zhang <Jisheng.Zhang@...aptics.com>
Cc:     Catalin Marinas <catalin.marinas@....com>,
        Will Deacon <will@...nel.org>,
        Alexander Potapenko <glider@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Linux ARM <linux-arm-kernel@...ts.infradead.org>,
        LKML <linux-kernel@...r.kernel.org>,
        kasan-dev <kasan-dev@...glegroups.com>,
        Linux Memory Management List <linux-mm@...ck.org>,
        Mark Rutland <mark.rutland@....com>
Subject: Re: [PATCH 2/2] arm64: remove page granularity limitation from KFENCE

+Cc Mark

On Mon, 24 May 2021 at 11:26, Jisheng Zhang <Jisheng.Zhang@...aptics.com> wrote:
>
> KFENCE requires linear map to be mapped at page granularity, so that
> it is possible to protect/unprotect single pages in the KFENCE pool.
> Currently if KFENCE is enabled, arm64 maps all pages at page
> granularity, it seems overkilled. In fact, we only need to map the
> pages in KFENCE pool itself at page granularity. We acchieve this goal
> by allocating KFENCE pool before paging_init() so we know the KFENCE
> pool address, then we take care to map the pool at page granularity
> during map_mem().
>
> Signed-off-by: Jisheng Zhang <Jisheng.Zhang@...aptics.com>
> ---
>  arch/arm64/kernel/setup.c |  3 +++
>  arch/arm64/mm/mmu.c       | 27 +++++++++++++++++++--------
>  2 files changed, 22 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
> index 61845c0821d9..51c0d6e8b67b 100644
> --- a/arch/arm64/kernel/setup.c
> +++ b/arch/arm64/kernel/setup.c
> @@ -18,6 +18,7 @@
>  #include <linux/screen_info.h>
>  #include <linux/init.h>
>  #include <linux/kexec.h>
> +#include <linux/kfence.h>
>  #include <linux/root_dev.h>
>  #include <linux/cpu.h>
>  #include <linux/interrupt.h>
> @@ -345,6 +346,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
>
>         arm64_memblock_init();
>
> +       kfence_alloc_pool();
> +
>         paging_init();
>
>         acpi_table_upgrade();
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 89b66ef43a0f..12712d31a054 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -13,6 +13,7 @@
>  #include <linux/init.h>
>  #include <linux/ioport.h>
>  #include <linux/kexec.h>
> +#include <linux/kfence.h>
>  #include <linux/libfdt.h>
>  #include <linux/mman.h>
>  #include <linux/nodemask.h>
> @@ -515,10 +516,16 @@ static void __init map_mem(pgd_t *pgdp)
>          */
>         BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
>
> -       if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
> -           IS_ENABLED(CONFIG_KFENCE))
> +       if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
>                 flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>
> +       /*
> +        * KFENCE requires linear map to be mapped at page granularity, so
> +        * temporarily skip mapping for __kfence_pool in the following
> +        * for-loop
> +        */
> +       memblock_mark_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE);
> +

Did you build this with CONFIG_KFENCE unset? I don't think it builds.

>         /*
>          * Take care not to create a writable alias for the
>          * read-only text and rodata sections of the kernel image.
> @@ -553,6 +560,15 @@ static void __init map_mem(pgd_t *pgdp)
>         __map_memblock(pgdp, kernel_start, kernel_end,
>                        PAGE_KERNEL, NO_CONT_MAPPINGS);
>         memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
> +
> +       /*
> +        * Map the __kfence_pool at page granularity now.
> +        */
> +       __map_memblock(pgdp, __pa(__kfence_pool),
> +                      __pa(__kfence_pool + KFENCE_POOL_SIZE),
> +                      pgprot_tagged(PAGE_KERNEL),
> +                      NO_EXEC_MAPPINGS | NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
> +       memblock_clear_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE);
>  }
>
>  void mark_rodata_ro(void)
> @@ -1480,12 +1496,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
>
>         VM_BUG_ON(!mhp_range_allowed(start, size, true));
>
> -       /*
> -        * KFENCE requires linear map to be mapped at page granularity, so that
> -        * it is possible to protect/unprotect single pages in the KFENCE pool.
> -        */
> -       if (rodata_full || debug_pagealloc_enabled() ||
> -           IS_ENABLED(CONFIG_KFENCE))
> +       if (rodata_full || debug_pagealloc_enabled())
>                 flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>
>         __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
> --
> 2.31.0
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@...glegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20210524172606.08dac28d%40xhacker.debian.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ