[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0fdb0dfc-d558-41f8-9c8d-d179892f35a9@arm.com>
Date: Mon, 5 Jan 2026 17:09:51 +0000
From: Ryan Roberts <ryan.roberts@....com>
To: Linu Cherian <linu.cherian@....com>
Cc: Will Deacon <will@...nel.org>, Ard Biesheuvel <ardb@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Mark Rutland <mark.rutland@....com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Oliver Upton <oliver.upton@...ux.dev>, Marc Zyngier <maz@...nel.org>,
Dev Jain <dev.jain@....com>, linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v1 01/13] arm64: mm: Re-implement the __tlbi_level macro
as a C function
On 05/01/2026 05:30, Linu Cherian wrote:
> Ryan,
>
> On Tue, Dec 16, 2025 at 02:45:46PM +0000, Ryan Roberts wrote:
>> As part of efforts to reduce our reliance on complex preprocessor macros
>> for TLB invalidation routines, convert the __tlbi_level macro to a C
>> function for by-level TLB invalidation.
>>
>> Each specific tlbi level op is implemented as a C function and the
>> appropriate function pointer is passed to __tlbi_level(). Since
>> everything is declared inline and is statically resolvable, the compiler
>> will convert the indirect function call to a direct inline execution.
>>
>> Suggested-by: Linus Torvalds <torvalds@...ux-foundation.org>
>> Signed-off-by: Ryan Roberts <ryan.roberts@....com>
>> ---
>> arch/arm64/include/asm/tlbflush.h | 69 +++++++++++++++++++++++++------
>> 1 file changed, 56 insertions(+), 13 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
>> index a2d65d7d6aae..13a59cf28943 100644
>> --- a/arch/arm64/include/asm/tlbflush.h
>> +++ b/arch/arm64/include/asm/tlbflush.h
>> @@ -105,19 +105,62 @@ static inline unsigned long get_trans_granule(void)
>>
>> #define TLBI_TTL_UNKNOWN INT_MAX
>>
>> -#define __tlbi_level(op, addr, level) do { \
>> - u64 arg = addr; \
>> - \
>> - if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
>> - level >= 0 && level <= 3) { \
>> - u64 ttl = level & 3; \
>> - ttl |= get_trans_granule() << 2; \
>> - arg &= ~TLBI_TTL_MASK; \
>> - arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
>> - } \
>> - \
>> - __tlbi(op, arg); \
>> -} while(0)
>> +typedef void (*tlbi_op)(u64 arg);
>> +
>> +static __always_inline void vae1is(u64 arg)
>> +{
>> + __tlbi(vae1is, arg);
>> +}
>> +
>> +static __always_inline void vae2is(u64 arg)
>> +{
>> + __tlbi(vae2is, arg);
>> +}
>> +
>> +static __always_inline void vale1(u64 arg)
>> +{
>> + __tlbi(vale1, arg);
>> + __tlbi_user(vale1, arg);
>
> Should the __tlbi_user addition be added as part of patch 3 ?
Yes, it should; good spot!
>
>> +}
>> +
>> +static __always_inline void vale1is(u64 arg)
>> +{
>> + __tlbi(vale1is, arg);
>> +}
>> +
>> +static __always_inline void vale2is(u64 arg)
>> +{
>> + __tlbi(vale2is, arg);
>> +}
>> +
>> +static __always_inline void vaale1is(u64 arg)
>> +{
>> + __tlbi(vaale1is, arg);
>> +}
>> +
>> +static __always_inline void ipas2e1(u64 arg)
>> +{
>> + __tlbi(ipas2e1, arg);
>> +}
>> +
>> +static __always_inline void ipas2e1is(u64 arg)
>> +{
>> + __tlbi(ipas2e1is, arg);
>> +}
>> +
>> +static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
>> +{
>> + u64 arg = addr;
>> +
>> + if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && level <= 3) {
>> + u64 ttl = level | (get_trans_granule() << 2);
>> +
>> + arg &= ~TLBI_TTL_MASK;
>> + arg |= FIELD_PREP(TLBI_TTL_MASK, ttl);
>> + }
>> +
>> + op(arg);
>> +}
>>
>> #define __tlbi_user_level(op, arg, level) do { \
>> if (arm64_kernel_unmapped_at_el0()) \
>> --
>> 2.43.0
>>
Powered by blists - more mailing lists