[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aVtNXhd1VRfktfAx@a079125.arm.com>
Date: Mon, 5 Jan 2026 11:04:22 +0530
From: Linu Cherian <linu.cherian@....com>
To: Ryan Roberts <ryan.roberts@....com>
Cc: Will Deacon <will@...nel.org>, Ard Biesheuvel <ardb@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Mark Rutland <mark.rutland@....com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Oliver Upton <oliver.upton@...ux.dev>,
Marc Zyngier <maz@...nel.org>, Dev Jain <dev.jain@....com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v1 03/13] arm64: mm: Implicitly invalidate user ASID
based on TLBI operation
Hi Ryan,
On Tue, Dec 16, 2025 at 02:45:48PM +0000, Ryan Roberts wrote:
> When kpti is enabled, separate ASIDs are used for userspace and
> kernelspace, requiring ASID-qualified TLB invalidation by virtual
> address to invalidate both of them.
>
> Push the logic for invalidating the two ASIDs down into the low-level
> tlbi-op-specific functions and remove the burden from the caller to
> handle the kpti-specific behaviour.
>
> Co-developed-by: Will Deacon <will@...nel.org>
> Signed-off-by: Will Deacon <will@...nel.org>
> Signed-off-by: Ryan Roberts <ryan.roberts@....com>
> ---
> arch/arm64/include/asm/tlbflush.h | 27 ++++++++++-----------------
> 1 file changed, 10 insertions(+), 17 deletions(-)
>
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index c5111d2afc66..31f43d953ce2 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -110,6 +110,7 @@ typedef void (*tlbi_op)(u64 arg);
> static __always_inline void vae1is(u64 arg)
> {
> __tlbi(vae1is, arg);
> + __tlbi_user(vae1is, arg);
> }
>
> static __always_inline void vae2is(u64 arg)
> @@ -126,6 +127,7 @@ static __always_inline void vale1(u64 arg)
> static __always_inline void vale1is(u64 arg)
> {
> __tlbi(vale1is, arg);
> + __tlbi_user(vale1is, arg);
> }
>
> static __always_inline void vale2is(u64 arg)
> @@ -162,11 +164,6 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
> op(arg);
> }
>
> -#define __tlbi_user_level(op, arg, level) do { \
> - if (arm64_kernel_unmapped_at_el0()) \
> - __tlbi_level(op, (arg | USER_ASID_FLAG), level); \
> -} while (0)
> -
> /*
> * This macro creates a properly formatted VA operand for the TLB RANGE. The
> * value bit assignments are:
> @@ -435,8 +432,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> * @stride: Flush granularity
> * @asid: The ASID of the task (0 for IPA instructions)
> * @tlb_level: Translation Table level hint, if known
> - * @tlbi_user: If 'true', call an additional __tlbi_user()
> - * (typically for user ASIDs). 'flase' for IPA instructions
> * @lpa2: If 'true', the lpa2 scheme is used as set out below
> *
> * When the CPU does not support TLB range operations, flush the TLB
> @@ -462,6 +457,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> static __always_inline void rvae1is(u64 arg)
> {
> __tlbi(rvae1is, arg);
> + __tlbi_user(rvae1is, arg);
> }
>
> static __always_inline void rvale1(u64 arg)
> @@ -473,6 +469,7 @@ static __always_inline void rvale1(u64 arg)
> static __always_inline void rvale1is(u64 arg)
> {
> __tlbi(rvale1is, arg);
> + __tlbi_user(rvale1is, arg);
> }
>
> static __always_inline void rvaale1is(u64 arg)
> @@ -491,7 +488,7 @@ static __always_inline void __tlbi_range(tlbi_op op, u64 arg)
> }
>
> #define __flush_tlb_range_op(op, start, pages, stride, \
> - asid, tlb_level, tlbi_user, lpa2) \
> + asid, tlb_level, lpa2) \
> do { \
> typeof(start) __flush_start = start; \
> typeof(pages) __flush_pages = pages; \
> @@ -506,8 +503,6 @@ do { \
> (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
> addr = __TLBI_VADDR(__flush_start, asid); \
> __tlbi_level(op, addr, tlb_level); \
> - if (tlbi_user) \
> - __tlbi_user_level(op, addr, tlb_level); \
> __flush_start += stride; \
> __flush_pages -= stride >> PAGE_SHIFT; \
> continue; \
> @@ -518,8 +513,6 @@ do { \
> addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
> scale, num, tlb_level); \
> __tlbi_range(r##op, addr); \
> - if (tlbi_user) \
> - __tlbi_user(r##op, addr); \
> __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
> __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
> } \
> @@ -528,7 +521,7 @@ do { \
> } while (0)
>
> #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
> - __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
> + __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled());
>
> static inline bool __flush_tlb_range_limit_excess(unsigned long start,
> unsigned long end, unsigned long pages, unsigned long stride)
> @@ -568,10 +561,10 @@ static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
>
> if (last_level)
> __flush_tlb_range_op(vale1is, start, pages, stride, asid,
> - tlb_level, true, lpa2_is_enabled());
> + tlb_level, lpa2_is_enabled());
> else
> __flush_tlb_range_op(vae1is, start, pages, stride, asid,
> - tlb_level, true, lpa2_is_enabled());
> + tlb_level, lpa2_is_enabled());
>
> mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
> }
> @@ -630,7 +623,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
>
> dsb(ishst);
> __flush_tlb_range_op(vaale1is, start, pages, stride, 0,
> - TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
> + TLBI_TTL_UNKNOWN, lpa2_is_enabled());
> dsb(ish);
> isb();
__flush_tlb_range_op call in local_flush_tlb_contpte is missed out.
--
Linu Cherian
Powered by blists - more mailing lists