lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAAhSdy1Qxwn=Eax6ECEkdRe=mQeL+J0TPCpOc1LMbN1y8-p4UQ@mail.gmail.com>
Date:   Sun, 21 Aug 2022 10:35:45 +0530
From:   Anup Patel <anup@...infault.org>
To:     Jinyu Tang <tjytimi@....com>
Cc:     paul.walmsley@...ive.com, palmer@...belt.com,
        aou@...s.berkeley.edu, alexandre.ghiti@...onical.com,
        guoren@...nel.org, akpm@...ux-foundation.org, heiko@...ech.de,
        panqinglin2020@...as.ac.cn, unnanyong@...wei.com,
        tongtiangen@...wei.com, anshuman.khandual@....com,
        atishp@...osinc.com, linux-riscv@...ts.infradead.org,
        linux-kernel@...r.kernel.org, falcon@...ylab.org
Subject: Re: [RFC PATCH v1] riscv: make update_mmu_cache to support asid

On Sun, Aug 21, 2022 at 7:09 AM Jinyu Tang <tjytimi@....com> wrote:
>
> The `update_mmu_cache` function in riscv flush tlb cache without asid
> information now, which will flush tlbs in other tasks' address space
> even if processor support asid. So add a new function
> `flush_tlb_local_one_page` to flush local one page whether processor
> supports asid or not. If asid is supported, this function will use it.
>
> Signed-off-by: Jinyu Tang <tjytimi@....com>

This is already covered by PATCH6 of the IPI improvement series.
https://www.spinics.net/lists/kernel/msg4481428.html

Regards,
Anup

> ---
>  arch/riscv/include/asm/pgtable.h  |  2 +-
>  arch/riscv/include/asm/tlbflush.h |  2 ++
>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>  3 files changed, 14 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 7ec936910a96..09ccefa6b6c7 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
>          * Relying on flush_tlb_fix_spurious_fault would suffice, but
>          * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
>          */
> -       local_flush_tlb_page(address);
> +       flush_tlb_local_one_page(vma, address);
>  }
>
>  static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 801019381dea..120aeb1c6ecf 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
>  #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
>  void flush_tlb_all(void);
>  void flush_tlb_mm(struct mm_struct *mm);
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>                      unsigned long end);
> @@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>
>  #define flush_tlb_all() local_flush_tlb_all()
>  #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
> +#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
>
>  static inline void flush_tlb_range(struct vm_area_struct *vma,
>                 unsigned long start, unsigned long end)
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 37ed760d007c..a2634ce55626 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -64,6 +64,17 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
>         put_cpu();
>  }
>
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
> +{
> +       if (static_branch_unlikely(&use_asid_allocator)) {
> +               unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
> +
> +               local_flush_tlb_page_asid(addr, asid);
> +       } else {
> +               local_flush_tlb_page(addr);
> +       }
> +}
> +
>  void flush_tlb_mm(struct mm_struct *mm)
>  {
>         __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
> --
> 2.30.2
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ