[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d54e8129-b2dd-480e-b665-91a7f6a20cab@arm.com>
Date: Tue, 10 Jun 2025 11:52:20 +0530
From: Anshuman Khandual <anshuman.khandual@....com>
To: Ryan Roberts <ryan.roberts@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>,
Paul Walmsley <paul.walmsley@...ive.com>, Palmer Dabbelt
<palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>,
Alexandre Ghiti <alex@...ti.fr>, Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, "H. Peter Anvin" <hpa@...or.com>,
David Hildenbrand <david@...hat.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>, Rik van Riel
<riel@...riel.com>, "Liam R. Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>, Harry Yoo <harry.yoo@...cle.com>
Cc: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org, linux-mm@...ck.org
Subject: Re: [PATCH v1] mm: Remove arch_flush_tlb_batched_pending() arch
helper
On 09/06/25 4:01 PM, Ryan Roberts wrote:
> Since commit 4b634918384c ("arm64/mm: Close theoretical race where stale
> TLB entry remains valid"), all arches that use tlbbatch for reclaim
> (arm64, riscv, x86) implement arch_flush_tlb_batched_pending() with a
> flush_tlb_mm().
>
> So let's simplify by removing the unnecessary abstraction and doing the
> flush_tlb_mm() directly in flush_tlb_batched_pending(). This effectively
> reverts commit db6c1f6f236d ("mm/tlbbatch: introduce
> arch_flush_tlb_batched_pending()").
>
> Suggested-by: Will Deacon <will@...nel.org>
> Signed-off-by: Ryan Roberts <ryan.roberts@....com>
Makes sense to replace arch callbacks with common generic functions
applicable for all platforms.
Reviewed-by: Anshuman Khandual <anshuman.khandual@....com>
> ---
> arch/arm64/include/asm/tlbflush.h | 11 -----------
> arch/riscv/include/asm/tlbflush.h | 1 -
> arch/riscv/mm/tlbflush.c | 5 -----
> arch/x86/include/asm/tlbflush.h | 5 -----
> mm/rmap.c | 2 +-
> 5 files changed, 1 insertion(+), 23 deletions(-)
>
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index aa9efee17277..18a5dc0c9a54 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -322,17 +322,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
> return true;
> }
>
> -/*
> - * If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure
> - * all the previously issued TLBIs targeting mm have completed. But since we
> - * can be executing on a remote CPU, a DSB cannot guarantee this like it can
> - * for arch_tlbbatch_flush(). Our only option is to flush the entire mm.
> - */
> -static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
> -{
> - flush_tlb_mm(mm);
> -}
> -
> /*
> * To support TLB batched flush for multiple pages unmapping, we only send
> * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 1a20dd746a49..eed0abc40514 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -63,7 +63,6 @@ void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start,
> bool arch_tlbbatch_should_defer(struct mm_struct *mm);
> void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> struct mm_struct *mm, unsigned long start, unsigned long end);
> -void arch_flush_tlb_batched_pending(struct mm_struct *mm);
> void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
>
> extern unsigned long tlb_flush_all_threshold;
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index e737ba7949b1..8404530ec00f 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -234,11 +234,6 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
> }
>
> -void arch_flush_tlb_batched_pending(struct mm_struct *mm)
> -{
> - flush_tlb_mm(mm);
> -}
> -
> void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> {
> __flush_tlb_range(NULL, &batch->cpumask,
> diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
> index e9b81876ebe4..00daedfefc1b 100644
> --- a/arch/x86/include/asm/tlbflush.h
> +++ b/arch/x86/include/asm/tlbflush.h
> @@ -356,11 +356,6 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
> mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
> }
>
> -static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
> -{
> - flush_tlb_mm(mm);
> -}
> -
> extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
>
> static inline bool pte_flags_need_flush(unsigned long oldflags,
> diff --git a/mm/rmap.c b/mm/rmap.c
> index fb63d9256f09..fd160ddaa980 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -746,7 +746,7 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
> int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
>
> if (pending != flushed) {
> - arch_flush_tlb_batched_pending(mm);
> + flush_tlb_mm(mm);
> /*
> * If the new TLB flushing is pending during flushing, leave
> * mm->tlb_flush_batched as is, to avoid losing flushing.
> --
> 2.43.0
>
>
Powered by blists - more mailing lists