lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 24 Mar 2020 14:45:14 +0000
From:   Marc Zyngier <maz@...nel.org>
To:     Zhenyu Ye <yezhenyu2@...wei.com>
Cc:     <will@...nel.org>, <mark.rutland@....com>,
        <catalin.marinas@....com>, <aneesh.kumar@...ux.ibm.com>,
        <akpm@...ux-foundation.org>, <npiggin@...il.com>,
        <peterz@...radead.org>, <arnd@...db.de>, <rostedt@...dmis.org>,
        <suzuki.poulose@....com>, <tglx@...utronix.de>,
        <yuzhao@...gle.com>, <Dave.Martin@....com>, <steven.price@....com>,
        <broonie@...nel.org>, <guohanjun@...wei.com>,
        <linux-arm-kernel@...ts.infradead.org>,
        <linux-kernel@...r.kernel.org>, <linux-arch@...r.kernel.org>,
        <linux-mm@...ck.org>, <arm@...nel.org>, <xiexiangyou@...wei.com>,
        <prime.zeng@...ilicon.com>, <zhangshaokun@...ilicon.com>
Subject: Re: [RFC PATCH v4 5/6] arm64: tlb: Use translation level hint in
 vm_flags

On Tue, 24 Mar 2020 21:45:33 +0800
Zhenyu Ye <yezhenyu2@...wei.com> wrote:

> This patch used the VM_LEVEL flags in vma->vm_flags to set the
> TTL field in tlbi instruction.
> 
> Signed-off-by: Zhenyu Ye <yezhenyu2@...wei.com>
> ---
>  arch/arm64/include/asm/mmu.h      |  2 ++
>  arch/arm64/include/asm/tlbflush.h | 14 ++++++++------
>  arch/arm64/mm/mmu.c               | 14 ++++++++++++++
>  3 files changed, 24 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
> index d79ce6df9e12..a8b8824a7405 100644
> --- a/arch/arm64/include/asm/mmu.h
> +++ b/arch/arm64/include/asm/mmu.h
> @@ -86,6 +86,8 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>  extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
>  extern void mark_linear_text_alias_ro(void);
>  extern bool kaslr_requires_kpti(void);
> +extern unsigned int get_vma_level(struct vm_area_struct *vma);
> +
>  
>  #define INIT_MM_CONTEXT(name)	\
>  	.pgd = init_pg_dir,
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index d141c080e494..93bb09fdfafd 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -218,10 +218,11 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
>  					 unsigned long uaddr)
>  {
>  	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
> +	unsigned int level = get_vma_level(vma);
>  
>  	dsb(ishst);
> -	__tlbi_level(vale1is, addr, 0);
> -	__tlbi_user_level(vale1is, addr, 0);
> +	__tlbi_level(vale1is, addr, level);
> +	__tlbi_user_level(vale1is, addr, level);
>  }
>  
>  static inline void flush_tlb_page(struct vm_area_struct *vma,
> @@ -242,6 +243,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
>  				     unsigned long stride, bool last_level)
>  {
>  	unsigned long asid = ASID(vma->vm_mm);
> +	unsigned int level = get_vma_level(vma);
>  	unsigned long addr;
>  
>  	start = round_down(start, stride);
> @@ -261,11 +263,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
>  	dsb(ishst);
>  	for (addr = start; addr < end; addr += stride) {
>  		if (last_level) {
> -			__tlbi_level(vale1is, addr, 0);
> -			__tlbi_user_level(vale1is, addr, 0);
> +			__tlbi_level(vale1is, addr, level);
> +			__tlbi_user_level(vale1is, addr, level);
>  		} else {
> -			__tlbi_level(vae1is, addr, 0);
> -			__tlbi_user_level(vae1is, addr, 0);
> +			__tlbi_level(vae1is, addr, level);
> +			__tlbi_user_level(vae1is, addr, level);
>  		}
>  	}
>  	dsb(ish);
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 128f70852bf3..e6a1221cd86b 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -60,6 +60,20 @@ static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
>  
>  static DEFINE_SPINLOCK(swapper_pgdir_lock);
>  
> +inline unsigned int get_vma_level(struct vm_area_struct *vma)
> +{
> +	unsigned int level = 0;
> +	if (vma->vm_flags & VM_LEVEL_PUD)
> +		level = 1;
> +	else if (vma->vm_flags & VM_LEVEL_PMD)
> +		level = 2;
> +	else if (vma->vm_flags & VM_LEVEL_PTE)
> +		level = 3;
> +
> +	vma->vm_flags &= ~(VM_LEVEL_PUD | VM_LEVEL_PMD | VM_LEVEL_PTE);
> +	return level;
> +}
> +
>  void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
>  {
>  	pgd_t *fixmap_pgdp;


If feels bizarre a TLBI is now a destructive operation: you've lost the
flags by having cleared them. Even if that's not really a problem in
practice (you issue TLBI because you've unmapped the VMA), it remains
that the act of invalidating TLBs isn't expected to change a kernel
structure (and I'm not even thinking about potential races here).

If anything, I feel this should be based around the mmu_gather
structure, which already tracks the right level of information and
additionally knows about the P4D level which is missing in your patches
(even if arm64 is so far limited to 4 levels).

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ