lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <72bc516f-0ee7-fbf1-7814-8501335b4246@loongson.cn>
Date:   Thu, 30 Mar 2023 16:53:03 +0800
From:   Qing Zhang <zhangqing@...ngson.cn>
To:     Youling Tang <tangyouling@...ngson.cn>
Cc:     Andrey Ryabinin <ryabinin.a.a@...il.com>,
        Jonathan Corbet <corbet@....net>,
        Huacai Chen <chenhuacai@...nel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Alexander Potapenko <glider@...gle.com>,
        Andrey Konovalov <andreyknvl@...il.com>,
        Dmitry Vyukov <dvyukov@...gle.com>,
        Vincenzo Frascino <vincenzo.frascino@....com>,
        WANG Xuerui <kernel@...0n.name>,
        Jiaxun Yang <jiaxun.yang@...goat.com>,
        kasan-dev@...glegroups.com, linux-doc@...r.kernel.org,
        linux-mm@...ck.org, loongarch@...ts.linux.dev,
        linux-kernel@...r.kernel.org, linux-hardening@...r.kernel.org
Subject: Re: [PATCH] LoongArch: Add kernel address sanitizer support

Hi, Youling

On 2023/3/30 下午2:50, Youling Tang wrote:
> Hi, Qing
> 
> On 03/28/2023 07:17 PM, Qing Zhang wrote:
>> 1/8 of kernel addresses reserved for shadow memory. But for LoongArch,
>> There are a lot of holes between different segments and valid address
>> space(256T available) is insufficient to map all these segments to kasan
>> shadow memory with the common formula provided by kasan core, saying
>> addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
>>
>> So Loongarch has a ARCH specific mapping formula,different segments
>> are mapped individually, and only limited length of space of that
>> specific segment is mapped to shadow.
>>
>> At early boot stage the whole shadow region populated with just
>> one physical page (kasan_early_shadow_page). Later, this page is
>> reused as readonly zero shadow for some memory that Kasan currently
>> don't track.
>> After mapping the physical memory, pages for shadow memory are
>> allocated and mapped.
>>
>> Functions like memset/memmove/memcpy do a lot of memory accesses.
>> If bad pointer passed to one of these function it is important
>> to catch this. Compiler's instrumentation cannot do this since
>> these functions are written in assembly.
>> KASan replaces memory functions with manually instrumented variants.
>> Original functions declared as weak symbols so strong definitions
>> in mm/kasan/kasan.c could replace them. Original functions have aliases
>> with '__' prefix in name, so we could call non-instrumented variant
>> if needed.
>>
>> Signed-off-by: Qing Zhang <zhangqing@...ngson.cn>
>> ---
>>  Documentation/dev-tools/kasan.rst             |   4 +-
>>  .../features/debug/KASAN/arch-support.txt     |   2 +-
>>  arch/loongarch/Kconfig                        |   7 +
>>  arch/loongarch/include/asm/kasan.h            | 120 +++++++++
>>  arch/loongarch/include/asm/pgtable.h          |   7 +
>>  arch/loongarch/include/asm/setup.h            |   2 +-
>>  arch/loongarch/include/asm/string.h           |  20 ++
>>  arch/loongarch/kernel/Makefile                |   3 +
>>  arch/loongarch/kernel/head.S                  |  14 +-
>>  arch/loongarch/kernel/relocate.c              |   8 +-
>>  arch/loongarch/kernel/setup.c                 |   4 +
>>  arch/loongarch/lib/memcpy.S                   |   4 +-
>>  arch/loongarch/lib/memmove.S                  |  13 +-
>>  arch/loongarch/lib/memset.S                   |   4 +-
>>  arch/loongarch/mm/Makefile                    |   2 +
>>  arch/loongarch/mm/kasan_init.c                | 255 ++++++++++++++++++
>>  arch/loongarch/vdso/Makefile                  |   4 +
>>  include/linux/kasan.h                         |   2 +
>>  mm/kasan/generic.c                            |   5 +
>>  mm/kasan/init.c                               |  10 +-
>>  mm/kasan/kasan.h                              |   6 +
>>  21 files changed, 470 insertions(+), 26 deletions(-)
>>  create mode 100644 arch/loongarch/include/asm/kasan.h
>>  create mode 100644 arch/loongarch/mm/kasan_init.c
>>
>> diff --git a/Documentation/dev-tools/kasan.rst 
>> b/Documentation/dev-tools/kasan.rst
>> index e66916a483cd..ee91f2872767 100644
>> --- a/Documentation/dev-tools/kasan.rst
>> +++ b/Documentation/dev-tools/kasan.rst
>> @@ -41,8 +41,8 @@ Support
>>  Architectures
>>  ~~~~~~~~~~~~~
>>
>> -Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, 
>> s390, and
>> -xtensa, and the tag-based KASAN modes are supported only on arm64.
>> +Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, 
>> s390, xtensa,
>> +and loongarch, and the tag-based KASAN modes are supported only on 
>> arm64.
>>
>>  Compilers
>>  ~~~~~~~~~
>> diff --git a/Documentation/features/debug/KASAN/arch-support.txt 
>> b/Documentation/features/debug/KASAN/arch-support.txt
>> index bf0124fae643..c4581c2edb28 100644
>> --- a/Documentation/features/debug/KASAN/arch-support.txt
>> +++ b/Documentation/features/debug/KASAN/arch-support.txt
>> @@ -13,7 +13,7 @@
>>      |        csky: | TODO |
>>      |     hexagon: | TODO |
>>      |        ia64: | TODO |
>> -    |   loongarch: | TODO |
>> +    |   loongarch: |  ok  |
>>      |        m68k: | TODO |
>>      |  microblaze: | TODO |
>>      |        mips: | TODO |
>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>> index 72dd00f48b8c..61f883c51045 100644
>> --- a/arch/loongarch/Kconfig
>> +++ b/arch/loongarch/Kconfig
>> @@ -7,6 +7,7 @@ config LOONGARCH
>>      select ACPI_MCFG if ACPI
>>      select ACPI_SYSTEM_POWER_STATES_SUPPORT    if ACPI
>>      select ARCH_BINFMT_ELF_STATE
>> +    select ARCH_DISABLE_KASAN_INLINE
>>      select ARCH_ENABLE_MEMORY_HOTPLUG
>>      select ARCH_ENABLE_MEMORY_HOTREMOVE
>>      select ARCH_HAS_ACPI_TABLE_UPGRADE    if ACPI
>> @@ -83,6 +84,7 @@ config LOONGARCH
>>      select HAVE_ARCH_AUDITSYSCALL
>>      select HAVE_ARCH_MMAP_RND_BITS if MMU
>>      select HAVE_ARCH_SECCOMP_FILTER
>> +    select HAVE_ARCH_KASAN if 64BIT
>>      select HAVE_ARCH_TRACEHOOK
>>      select HAVE_ARCH_TRANSPARENT_HUGEPAGE
>>      select HAVE_ASM_MODVERSIONS
>> @@ -626,6 +628,11 @@ config ARCH_MMAP_RND_BITS_MIN
>>  config ARCH_MMAP_RND_BITS_MAX
>>      default 18
>>
>> +config KASAN_SHADOW_OFFSET
>> +    hex
>> +    default 0x0
>> +    depends on KASAN
>> +
>>  menu "Power management options"
>>
>>  config ARCH_SUSPEND_POSSIBLE
>> diff --git a/arch/loongarch/include/asm/kasan.h 
>> b/arch/loongarch/include/asm/kasan.h
>> new file mode 100644
>> index 000000000000..582bcded311e
>> --- /dev/null
>> +++ b/arch/loongarch/include/asm/kasan.h
>> @@ -0,0 +1,120 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +#ifndef __ASM_KASAN_H
>> +#define __ASM_KASAN_H
>> +
>> +#ifndef __ASSEMBLY__
>> +
>> +#include <linux/linkage.h>
>> +#include <linux/mmzone.h>
>> +#include <asm/addrspace.h>
>> +#include <asm/io.h>
>> +#include <asm/pgtable.h>
>> +
>> +#define __HAVE_ARCH_SHADOW_MAP
>> +
>> +#define KASAN_SHADOW_SCALE_SHIFT 3
>> +#define KASAN_SHADOW_OFFSET    _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
>> +
>> +#define XRANGE_SHIFT (48)
>> +
>> +/* Valid address length */
>> +#define XRANGE_SHADOW_SHIFT    (PGDIR_SHIFT + PAGE_SHIFT - 3)
>> +/* Used for taking out the valid address */
>> +#define XRANGE_SHADOW_MASK    GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
>> +/* One segment whole address space size */
>> +#define    XRANGE_SIZE        (XRANGE_SHADOW_MASK + 1)
>> +
>> +/* 64-bit segment value. */
>> +#define XKPRANGE_UC_SEG        (0x8000)
>> +#define XKPRANGE_CC_SEG        (0x9000)
>> +#define XKVRANGE_VC_SEG        (0xffff)
>> +
>> +/* Cached */
>> +#define XKPRANGE_CC_START        CACHE_BASE
>> +#define XKPRANGE_CC_SIZE        XRANGE_SIZE
>> +#define XKPRANGE_CC_KASAN_OFFSET    (0)
>> +#define XKPRANGE_CC_SHADOW_SIZE        (XKPRANGE_CC_SIZE >> 
>> KASAN_SHADOW_SCALE_SHIFT)
>> +#define XKPRANGE_CC_SHADOW_END        (XKPRANGE_CC_KASAN_OFFSET + 
>> XKPRANGE_CC_SHADOW_SIZE)
>> +
>> +/* UnCached */
>> +#define XKPRANGE_UC_START        UNCACHE_BASE
>> +#define XKPRANGE_UC_SIZE        XRANGE_SIZE
>> +#define XKPRANGE_UC_KASAN_OFFSET    XKPRANGE_CC_SHADOW_END
>> +#define XKPRANGE_UC_SHADOW_SIZE        (XKPRANGE_UC_SIZE >> 
>> KASAN_SHADOW_SCALE_SHIFT)
>> +#define XKPRANGE_UC_SHADOW_END        (XKPRANGE_UC_KASAN_OFFSET + 
>> XKPRANGE_UC_SHADOW_SIZE)
>> +
>> +/* VMALLOC (Cached or UnCached)  */
>> +#define XKVRANGE_VC_START        MODULES_VADDR
>> +#define XKVRANGE_VC_SIZE        round_up(VMEMMAP_END - MODULES_VADDR 
>> + 1, PGDIR_SIZE)
>> +#define XKVRANGE_VC_KASAN_OFFSET    XKPRANGE_UC_SHADOW_END
>> +#define XKVRANGE_VC_SHADOW_SIZE        (XKVRANGE_VC_SIZE >> 
>> KASAN_SHADOW_SCALE_SHIFT)
>> +#define XKVRANGE_VC_SHADOW_END        (XKVRANGE_VC_KASAN_OFFSET + 
>> XKVRANGE_VC_SHADOW_SIZE)
>> +
>> +/* Kasan shadow memory start right after vmalloc. */
>> +#define KASAN_SHADOW_START        round_up(VMEMMAP_END, PGDIR_SIZE)
>> +#define KASAN_SHADOW_SIZE        (XKVRANGE_VC_SHADOW_END - 
>> XKPRANGE_CC_KASAN_OFFSET)
>> +#define KASAN_SHADOW_END        round_up(KASAN_SHADOW_START + 
>> KASAN_SHADOW_SIZE, PGDIR_SIZE)
>> +
>> +#define XKPRANGE_CC_SHADOW_OFFSET    (KASAN_SHADOW_START + 
>> XKPRANGE_CC_KASAN_OFFSET)
>> +#define XKPRANGE_UC_SHADOW_OFFSET    (KASAN_SHADOW_START + 
>> XKPRANGE_UC_KASAN_OFFSET)
>> +#define XKVRANGE_VC_SHADOW_OFFSET    (KASAN_SHADOW_START + 
>> XKVRANGE_VC_KASAN_OFFSET)
>> +
>> +extern bool kasan_early_stage;
>> +extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
>> +
>> +static inline void *kasan_mem_to_shadow(const void *addr)
>> +{
>> +    if (kasan_early_stage) {
>> +        return (void *)(kasan_early_shadow_page);
>> +    } else {
>> +        unsigned long maddr = (unsigned long)addr;
>> +        unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
>> +        unsigned long offset = 0;
>> +
>> +        maddr &= XRANGE_SHADOW_MASK;
>> +        switch (xrange) {
>> +        case XKPRANGE_CC_SEG:
>> +            offset = XKPRANGE_CC_SHADOW_OFFSET;
>> +            break;
>> +        case XKPRANGE_UC_SEG:
>> +            offset = XKPRANGE_UC_SHADOW_OFFSET;
>> +            break;
>> +        case XKVRANGE_VC_SEG:
>> +            offset = XKVRANGE_VC_SHADOW_OFFSET;
>> +            break;
>> +        default:
>> +            WARN_ON(1);
>> +            return NULL;
>> +        }
>> +
>> +        return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
>> +    }
>> +}
>> +
>> +static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
>> +{
>> +    unsigned long addr = (unsigned long)shadow_addr;
>> +
>> +    if (unlikely(addr > KASAN_SHADOW_END) ||
>> +        unlikely(addr < KASAN_SHADOW_START)) {
>> +        WARN_ON(1);
>> +        return NULL;
>> +    }
>> +
>> +    if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
>> +        return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << 
>> KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
>> +    else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
>> +        return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << 
>> KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
>> +    else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
>> +        return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << 
>> KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
>> +    else {
>> +        WARN_ON(1);
>> +        return NULL;
>> +    }
>> +}
>> +
>> +void kasan_init(void);
>> +asmlinkage void kasan_early_init(void);
>> +
>> +#endif
>> +#endif
>> diff --git a/arch/loongarch/include/asm/pgtable.h 
>> b/arch/loongarch/include/asm/pgtable.h
>> index d28fb9dbec59..5cfdf79b287e 100644
>> --- a/arch/loongarch/include/asm/pgtable.h
>> +++ b/arch/loongarch/include/asm/pgtable.h
>> @@ -86,9 +86,16 @@ extern unsigned long zero_page_mask;
>>  #define MODULES_END    (MODULES_VADDR + SZ_256M)
>>
>>  #define VMALLOC_START    MODULES_END
>> +
>> +#ifndef CONFIG_KASAN
>>  #define VMALLOC_END    \
>>      (vm_map_base +    \
>>       min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * 
>> PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE)
>> +#else
>> +#define VMALLOC_END    \
>> +    (vm_map_base +    \
>> +     min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * 
>> PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE)
>> +#endif
>>
>>  #define vmemmap        ((struct page *)((VMALLOC_END + PMD_SIZE) & 
>> PMD_MASK))
>>  #define VMEMMAP_END    ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
>> diff --git a/arch/loongarch/include/asm/setup.h 
>> b/arch/loongarch/include/asm/setup.h
>> index be05c0e706a2..2dca0d1dd90a 100644
>> --- a/arch/loongarch/include/asm/setup.h
>> +++ b/arch/loongarch/include/asm/setup.h
>> @@ -33,7 +33,7 @@ extern long __la_abs_end;
>>  extern long __rela_dyn_begin;
>>  extern long __rela_dyn_end;
>>
>> -extern void * __init relocate_kernel(void);
>> +extern unsigned long __init relocate_kernel(void);
>>
>>  #endif
>>
>> diff --git a/arch/loongarch/include/asm/string.h 
>> b/arch/loongarch/include/asm/string.h
>> index 7b29cc9c70aa..5bb5a90d2681 100644
>> --- a/arch/loongarch/include/asm/string.h
>> +++ b/arch/loongarch/include/asm/string.h
>> @@ -7,11 +7,31 @@
>>
>>  #define __HAVE_ARCH_MEMSET
>>  extern void *memset(void *__s, int __c, size_t __count);
>> +extern void *__memset(void *__s, int __c, size_t __count);
>>
>>  #define __HAVE_ARCH_MEMCPY
>>  extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
>> +extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
>>
>>  #define __HAVE_ARCH_MEMMOVE
>>  extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
>> +extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
>> +
>> +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
>> +
>> +/*
>> + * For files that are not instrumented (e.g. mm/slub.c) we
>> + * should use not instrumented version of mem* functions.
>> + */
>> +
>> +#define memset(s, c, n) __memset(s, c, n)
>> +#define memcpy(dst, src, len) __memcpy(dst, src, len)
>> +#define memmove(dst, src, len) __memmove(dst, src, len)
>> +
>> +#ifndef __NO_FORTIFY
>> +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
> 
> If we need to add __NO_FORTIFY processing, please add
> ARCH_HAS_FORTIFY_SOURCE in Kconfig, and submit a separate patch like
> 9530141455c9 ("riscv: Add ARCH_HAS_FORTIFY_SOURCE").

Ok, I will add separate patch in v2.

Thanks,
-Qing
> 
> Youling.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ