[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aUOnxYvmXwOJL4yU@a079125.arm.com>
Date: Thu, 18 Dec 2025 12:35:41 +0530
From: Linu Cherian <linu.cherian@....com>
To: Ryan Roberts <ryan.roberts@....com>
Cc: Will Deacon <will@...nel.org>, Ard Biesheuvel <ardb@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Mark Rutland <mark.rutland@....com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Oliver Upton <oliver.upton@...ux.dev>,
Marc Zyngier <maz@...nel.org>, Dev Jain <dev.jain@....com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v1 03/13] arm64: mm: Implicitly invalidate user ASID
based on TLBI operation
On Thu, Dec 18, 2025 at 12:00:57PM +0530, Linu Cherian wrote:
> Ryan,
>
> On Tue, Dec 16, 2025 at 02:45:48PM +0000, Ryan Roberts wrote:
> > When kpti is enabled, separate ASIDs are used for userspace and
> > kernelspace, requiring ASID-qualified TLB invalidation by virtual
> > address to invalidate both of them.
> >
> > Push the logic for invalidating the two ASIDs down into the low-level
> > tlbi-op-specific functions and remove the burden from the caller to
> > handle the kpti-specific behaviour.
> >
> > Co-developed-by: Will Deacon <will@...nel.org>
> > Signed-off-by: Will Deacon <will@...nel.org>
> > Signed-off-by: Ryan Roberts <ryan.roberts@....com>
> > ---
> > arch/arm64/include/asm/tlbflush.h | 27 ++++++++++-----------------
> > 1 file changed, 10 insertions(+), 17 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> > index c5111d2afc66..31f43d953ce2 100644
> > --- a/arch/arm64/include/asm/tlbflush.h
> > +++ b/arch/arm64/include/asm/tlbflush.h
> > @@ -110,6 +110,7 @@ typedef void (*tlbi_op)(u64 arg);
> > static __always_inline void vae1is(u64 arg)
> > {
> > __tlbi(vae1is, arg);
> > + __tlbi_user(vae1is, arg);
> > }
> >
> > static __always_inline void vae2is(u64 arg)
> > @@ -126,6 +127,7 @@ static __always_inline void vale1(u64 arg)
> > static __always_inline void vale1is(u64 arg)
> > {
> > __tlbi(vale1is, arg);
> > + __tlbi_user(vale1is, arg);
> > }
> >
> > static __always_inline void vale2is(u64 arg)
> > @@ -162,11 +164,6 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
> > op(arg);
> > }
> >
> > -#define __tlbi_user_level(op, arg, level) do { \
> > - if (arm64_kernel_unmapped_at_el0()) \
> > - __tlbi_level(op, (arg | USER_ASID_FLAG), level); \
> > -} while (0)
> > -
> > /*
> > * This macro creates a properly formatted VA operand for the TLB RANGE. The
> > * value bit assignments are:
> > @@ -435,8 +432,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> > * @stride: Flush granularity
> > * @asid: The ASID of the task (0 for IPA instructions)
> > * @tlb_level: Translation Table level hint, if known
> > - * @tlbi_user: If 'true', call an additional __tlbi_user()
> > - * (typically for user ASIDs). 'flase' for IPA instructions
> > * @lpa2: If 'true', the lpa2 scheme is used as set out below
> > *
> > * When the CPU does not support TLB range operations, flush the TLB
> > @@ -462,6 +457,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> > static __always_inline void rvae1is(u64 arg)
> > {
> > __tlbi(rvae1is, arg);
> > + __tlbi_user(rvae1is, arg);
> > }
> >
> > static __always_inline void rvale1(u64 arg)
> > @@ -473,6 +469,7 @@ static __always_inline void rvale1(u64 arg)
> > static __always_inline void rvale1is(u64 arg)
> > {
> > __tlbi(rvale1is, arg);
> > + __tlbi_user(rvale1is, arg);
> > }
> >
> > static __always_inline void rvaale1is(u64 arg)
> > @@ -491,7 +488,7 @@ static __always_inline void __tlbi_range(tlbi_op op, u64 arg)
> > }
> >
> > #define __flush_tlb_range_op(op, start, pages, stride, \
> > - asid, tlb_level, tlbi_user, lpa2) \
> > + asid, tlb_level, lpa2) \
> > do { \
> > typeof(start) __flush_start = start; \
> > typeof(pages) __flush_pages = pages; \
> > @@ -506,8 +503,6 @@ do { \
> > (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
> > addr = __TLBI_VADDR(__flush_start, asid); \
> > __tlbi_level(op, addr, tlb_level); \
> > - if (tlbi_user) \
> > - __tlbi_user_level(op, addr, tlb_level); \
> > __flush_start += stride; \
> > __flush_pages -= stride >> PAGE_SHIFT; \
> > continue; \
> > @@ -518,8 +513,6 @@ do { \
> > addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
> > scale, num, tlb_level); \
> > __tlbi_range(r##op, addr); \
> > - if (tlbi_user) \
> > - __tlbi_user(r##op, addr); \
> > __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
> > __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
>
>
> There are more __tlbi_user invocations in __flush_tlb_mm, __local_flush_tlb_page_nonotify_nosync
> and __flush_tlb_page_nosync in this file. Should we not address them as well as
> part of this ?
>
I see that except __flush_tlb_mm, the others got addressed in subsequent patches.
Should we hint this in the commit message ?
Powered by blists - more mailing lists