lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7e17ceaf-6648-e099-d526-8b698eaa5a04@microchip.com>
Date:   Sun, 4 Sep 2022 13:49:31 +0000
From:   <Conor.Dooley@...rochip.com>
To:     <tjytimi@....com>, <anup@...infault.org>,
        <paul.walmsley@...ive.com>, <palmer@...belt.com>,
        <aou@...s.berkeley.edu>, <alexandre.ghiti@...onical.com>,
        <guoren@...nel.org>, <heiko@...ech.de>,
        <akpm@...ux-foundation.org>, <panqinglin2020@...as.ac.cn>,
        <tongtiangen@...wei.com>, <sunnanyong@...wei.com>,
        <anshuman.khandual@....com>, <atishp@...osinc.com>
CC:     <linux-riscv@...ts.infradead.org>, <linux-kernel@...r.kernel.org>,
        <falcon@...ylab.org>
Subject: Re: [PATCH v2] riscv: make update_mmu_cache to support asid

On 04/09/2022 14:37, Jinyu Tang wrote:
> The `update_mmu_cache` function in riscv flush tlb cache without asid

FWIW, when referring to functions please put the () at the end.
Makes the changelog more natural to read. You do not need to make
a v3 for that though.

Thanks,
Conor.

> information now, which will flush tlbs in other tasks' address space
> even if processor supports asid. So add a new function
> `flush_tlb_local_one_page` to flush local one page whether processor
> supports asid or not,for cases that need to flush local one page like
> function `update_mmu_cache`.
> 
> Signed-off-by: Jinyu Tang <tjytimi@....com>
> ---
> RFC V1 -> V2 : 
> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
> suggestion. 
> 2.Make commit log more clear.
> 
>  arch/riscv/include/asm/pgtable.h  |  2 +-
>  arch/riscv/include/asm/tlbflush.h |  2 ++
>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>  3 files changed, 14 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 7ec936910a96..09ccefa6b6c7 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
>  	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
>  	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
>  	 */
> -	local_flush_tlb_page(address);
> +	flush_tlb_local_one_page(vma, address);
>  }
>  
>  static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 801019381dea..120aeb1c6ecf 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
>  #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
>  void flush_tlb_all(void);
>  void flush_tlb_mm(struct mm_struct *mm);
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>  		     unsigned long end);
> @@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>  
>  #define flush_tlb_all() local_flush_tlb_all()
>  #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
> +#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
>  
>  static inline void flush_tlb_range(struct vm_area_struct *vma,
>  		unsigned long start, unsigned long end)
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 27a7db8eb2c4..0843e1baaf34 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -41,6 +41,17 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
>  		local_flush_tlb_all_asid(asid);
>  }
>  
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
> +{
> +	if (static_branch_unlikely(&use_asid_allocator)) {
> +		unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
> +
> +		local_flush_tlb_page_asid(addr, asid);
> +	} else {
> +		local_flush_tlb_page(addr);
> +	}
> +}
> +
>  static void __ipi_flush_tlb_all(void *info)
>  {
>  	local_flush_tlb_all();

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ