lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <227e2c6e-f479-849d-8942-1d5ff4ccd440@arm.com>
Date:   Thu, 12 Oct 2017 08:58:53 +0100
From:   Marc Zyngier <marc.zyngier@....com>
To:     Abbott Liu <liuwenliang@...wei.com>, linux@...linux.org.uk,
        aryabinin@...tuozzo.com, afzal.mohd.ma@...il.com,
        f.fainelli@...il.com, labbott@...hat.com,
        kirill.shutemov@...ux.intel.com, mhocko@...e.com, cdall@...aro.org,
        catalin.marinas@....com, akpm@...ux-foundation.org,
        mawilcox@...rosoft.com, tglx@...utronix.de, thgarnie@...gle.com,
        keescook@...omium.org, arnd@...db.de, vladimir.murzin@....com,
        tixy@...aro.org, ard.biesheuvel@...aro.org, robin.murphy@....com,
        mingo@...nel.org, grygorii.strashko@...aro.org
Cc:     glider@...gle.com, dvyukov@...gle.com, opendmb@...il.com,
        linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
        kasan-dev@...glegroups.com, linux-mm@...ck.org,
        jiazhenghua@...wei.com, dylix.dailei@...wei.com,
        zengweilin@...wei.com, heshaoliang@...wei.com
Subject: Re: [PATCH 01/11] Initialize the mapping of KASan shadow memory

On 11/10/17 09:22, Abbott Liu wrote:
> From: Andrey Ryabinin <a.ryabinin@...sung.com>
> 
> This patch initializes KASan shadow region's page table and memory.
> There are two stage for KASan initializing:
> 1. At early boot stage the whole shadow region is mapped to just
>    one physical page (kasan_zero_page). It's finished by the function
>    kasan_early_init which is called by __mmap_switched(arch/arm/kernel/
>    head-common.S)
> 
> 2. After the calling of paging_init, we use kasan_zero_page as zero
>    shadow for some memory that KASan don't need to track, and we alloc
>    new shadow space for the other memory that KASan need to track. These
>    issues are finished by the function kasan_init which is call by setup_arch.
> 
> Cc: Andrey Ryabinin <a.ryabinin@...sung.com>
> Signed-off-by: Abbott Liu <liuwenliang@...wei.com>
> ---
>  arch/arm/include/asm/kasan.h       |  20 +++
>  arch/arm/include/asm/pgalloc.h     |   5 +-
>  arch/arm/include/asm/pgtable.h     |   1 +
>  arch/arm/include/asm/proc-fns.h    |  33 +++++
>  arch/arm/include/asm/thread_info.h |   4 +
>  arch/arm/kernel/head-common.S      |   4 +
>  arch/arm/kernel/setup.c            |   2 +
>  arch/arm/mm/Makefile               |   5 +
>  arch/arm/mm/kasan_init.c           | 257 +++++++++++++++++++++++++++++++++++++
>  mm/kasan/kasan.c                   |   2 +-
>  10 files changed, 331 insertions(+), 2 deletions(-)
>  create mode 100644 arch/arm/include/asm/kasan.h
>  create mode 100644 arch/arm/mm/kasan_init.c
> 
> diff --git a/arch/arm/include/asm/kasan.h b/arch/arm/include/asm/kasan.h
> new file mode 100644
> index 0000000..90ee60c
> --- /dev/null
> +++ b/arch/arm/include/asm/kasan.h
> @@ -0,0 +1,20 @@
> +#ifndef __ASM_KASAN_H
> +#define __ASM_KASAN_H
> +
> +#ifdef CONFIG_KASAN
> +
> +#include <asm/kasan_def.h>
> +/*
> + * Compiler uses shadow offset assuming that addresses start
> + * from 0. Kernel addresses don't start from 0, so shadow
> + * for kernel really starts from 'compiler's shadow offset' +
> + * ('kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT)
> + */
> +
> +extern void kasan_init(void);
> +
> +#else
> +static inline void kasan_init(void) { }
> +#endif
> +
> +#endif
> diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
> index b2902a5..10cee6a 100644
> --- a/arch/arm/include/asm/pgalloc.h
> +++ b/arch/arm/include/asm/pgalloc.h
> @@ -50,8 +50,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
>   */
>  #define pmd_alloc_one(mm,addr)		({ BUG(); ((pmd_t *)2); })
>  #define pmd_free(mm, pmd)		do { } while (0)
> +#ifndef CONFIG_KASAN
>  #define pud_populate(mm,pmd,pte)	BUG()
> -
> +#else
> +#define pud_populate(mm,pmd,pte)	do { } while (0)
> +#endif
>  #endif	/* CONFIG_ARM_LPAE */
>  
>  extern pgd_t *pgd_alloc(struct mm_struct *mm);
> diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
> index 1c46238..fdf343f 100644
> --- a/arch/arm/include/asm/pgtable.h
> +++ b/arch/arm/include/asm/pgtable.h
> @@ -97,6 +97,7 @@ extern pgprot_t		pgprot_s2_device;
>  #define PAGE_READONLY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
>  #define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
>  #define PAGE_KERNEL		_MOD_PROT(pgprot_kernel, L_PTE_XN)
> +#define PAGE_KERNEL_RO		_MOD_PROT(pgprot_kernel, L_PTE_XN | L_PTE_RDONLY)
>  #define PAGE_KERNEL_EXEC	pgprot_kernel
>  #define PAGE_HYP		_MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
>  #define PAGE_HYP_EXEC		_MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
> diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
> index f2e1af4..6e26714 100644
> --- a/arch/arm/include/asm/proc-fns.h
> +++ b/arch/arm/include/asm/proc-fns.h
> @@ -131,6 +131,15 @@ extern void cpu_resume(void);
>  		pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1);	\
>  		(pgd_t *)phys_to_virt(pg);		\
>  	})
> +
> +#define cpu_set_ttbr0(val)					\
> +	do {							\
> +		u64 ttbr = val;					\
> +		__asm__("mcrr	p15, 0, %Q0, %R0, c2"		\
> +			: : "r" (ttbr));	\
> +	} while (0)
> +
> +
>  #else
>  #define cpu_get_pgd()	\
>  	({						\
> @@ -140,6 +149,30 @@ extern void cpu_resume(void);
>  		pg &= ~0x3fff;				\
>  		(pgd_t *)phys_to_virt(pg);		\
>  	})
> +
> +#define cpu_set_ttbr(nr, val)					\
> +	do {							\
> +		u64 ttbr = val;					\
> +		__asm__("mcr	p15, 0, %0, c2, c0, 0"		\
> +			: : "r" (ttbr));			\
> +	} while (0)
> +
> +#define cpu_get_ttbr(nr)					\
> +	({							\
> +		unsigned long ttbr;				\
> +		__asm__("mrc	p15, 0, %0, c2, c0, 0"		\
> +			: "=r" (ttbr));				\
> +		ttbr;						\
> +	})
> +
> +#define cpu_set_ttbr0(val)					\
> +	do {							\
> +		u64 ttbr = val;					\
> +		__asm__("mcr	p15, 0, %0, c2, c0, 0"		\
> +			: : "r" (ttbr));			\
> +	} while (0)
> +
> +

You could instead lift and extend the definitions provided in kvm_hyp.h,
and use the read_sysreg/write_sysreg helpers defined in cp15.h.

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ