[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHVXubiVH=q9pnTLQyjS3X3W-hvuA=ZMM2D2xYPFkGjFnAgbWg@mail.gmail.com>
Date: Wed, 13 Sep 2023 10:04:27 +0200
From: Alexandre Ghiti <alexghiti@...osinc.com>
To: Will Deacon <will@...nel.org>,
"Aneesh Kumar K . V" <aneesh.kumar@...ux.ibm.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Nick Piggin <npiggin@...il.com>,
Peter Zijlstra <peterz@...radead.org>,
Mayuresh Chitale <mchitale@...tanamicro.com>,
Vincent Chen <vincent.chen@...ive.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>, linux-arch@...r.kernel.org,
linux-mm@...ck.org, linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org, Samuel Holland <samuel@...lland.org>,
Lad Prabhakar <prabhakar.csengg@...il.com>
Cc: Andrew Jones <ajones@...tanamicro.com>
Subject: Re: [PATCH v4 4/4] riscv: Improve flush_tlb_kernel_range()
@Lad, Prabhakar Any chance you give a try to this new patchset? So
that we make sure Samuel found your issue :)
On Mon, Sep 11, 2023 at 3:16 PM Alexandre Ghiti <alexghiti@...osinc.com> wrote:
>
> This function used to simply flush the whole tlb of all harts, be more
> subtile and try to only flush the range.
>
> The problem is that we can only use PAGE_SIZE as stride since we don't know
> the size of the underlying mapping and then this function will be improved
> only if the size of the region to flush is < threshold * PAGE_SIZE.
>
> Signed-off-by: Alexandre Ghiti <alexghiti@...osinc.com>
> Reviewed-by: Andrew Jones <ajones@...tanamicro.com>
> ---
> arch/riscv/include/asm/tlbflush.h | 11 ++++++-----
> arch/riscv/mm/tlbflush.c | 33 ++++++++++++++++++++++---------
> 2 files changed, 30 insertions(+), 14 deletions(-)
>
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 170a49c531c6..8f3418c5f172 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -40,6 +40,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
> void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
> void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
> unsigned long end);
> +void flush_tlb_kernel_range(unsigned long start, unsigned long end);
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
> void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
> @@ -56,15 +57,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
> local_flush_tlb_all();
> }
>
> -#define flush_tlb_mm(mm) flush_tlb_all()
> -#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
> -#endif /* !CONFIG_SMP || !CONFIG_MMU */
> -
> /* Flush a range of kernel pages */
> static inline void flush_tlb_kernel_range(unsigned long start,
> unsigned long end)
> {
> - flush_tlb_all();
> + local_flush_tlb_all();
> }
>
> +#define flush_tlb_mm(mm) flush_tlb_all()
> +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
> +#endif /* !CONFIG_SMP || !CONFIG_MMU */
> +
> #endif /* _ASM_RISCV_TLBFLUSH_H */
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 2c1136d73411..28cd8539b575 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -97,19 +97,27 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
> unsigned long size, unsigned long stride)
> {
> struct flush_tlb_range_data ftd;
> - struct cpumask *cmask = mm_cpumask(mm);
> + struct cpumask *cmask, full_cmask;
> unsigned long asid = FLUSH_TLB_NO_ASID;
> - unsigned int cpuid;
> bool broadcast;
>
> - if (cpumask_empty(cmask))
> - return;
> + if (mm) {
> + unsigned int cpuid;
> +
> + cmask = mm_cpumask(mm);
> + if (cpumask_empty(cmask))
> + return;
>
> - cpuid = get_cpu();
> - /* check if the tlbflush needs to be sent to other CPUs */
> - broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
> + cpuid = get_cpu();
> + /* check if the tlbflush needs to be sent to other CPUs */
> + broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
> + } else {
> + cpumask_setall(&full_cmask);
> + cmask = &full_cmask;
> + broadcast = true;
> + }
>
> - if (static_branch_unlikely(&use_asid_allocator))
> + if (static_branch_unlikely(&use_asid_allocator) && mm)
> asid = atomic_long_read(&mm->context.id) & asid_mask;
>
> if (broadcast) {
> @@ -128,7 +136,8 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
> local_flush_tlb_range_asid(start, size, stride, asid);
> }
>
> - put_cpu();
> + if (mm)
> + put_cpu();
> }
>
> void flush_tlb_mm(struct mm_struct *mm)
> @@ -189,6 +198,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>
> __flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
> }
> +
> +void flush_tlb_kernel_range(unsigned long start, unsigned long end)
> +{
> + __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
> +}
> +
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
> unsigned long end)
> --
> 2.39.2
>
Powered by blists - more mailing lists