[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250711161732.384-8-will@kernel.org>
Date: Fri, 11 Jul 2025 17:17:29 +0100
From: Will Deacon <will@...nel.org>
To: linux-arm-kernel@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org,
Will Deacon <will@...nel.org>,
Ard Biesheuvel <ardb@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Ryan Roberts <ryan.roberts@....com>,
Mark Rutland <mark.rutland@....com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Oliver Upton <oliver.upton@...ux.dev>,
Marc Zyngier <maz@...nel.org>
Subject: [PATCH 07/10] arm64: mm: Push __TLBI_VADDR() into __tlbi_level()
The __TLBI_VADDR() macro takes an ASID and an address and converts them
into a single argument formatted correctly for a TLB invalidation
instruction.
Rather than have callers worry about this (especially in the case where
the ASID is zero), push the macro down into __tlbi_level() via a new
__tlbi_level_asid() helper.
Signed-off-by: Will Deacon <will@...nel.org>
---
arch/arm64/include/asm/tlbflush.h | 14 ++++++++++----
arch/arm64/kernel/sys_compat.c | 2 +-
arch/arm64/kvm/hyp/nvhe/mm.c | 2 +-
arch/arm64/kvm/hyp/pgtable.c | 4 ++--
4 files changed, 14 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index a8d21e52ef3a..434b9fdb340a 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -128,9 +128,10 @@ enum tlbi_op {
___GEN_TLBI_OP_CASE(op); \
break
-static __always_inline void __tlbi_level(const enum tlbi_op op, u64 addr, u32 level)
+static __always_inline void __tlbi_level_asid(const enum tlbi_op op, u64 addr,
+ u32 level, u16 asid)
{
- u64 arg = addr;
+ u64 arg = __TLBI_VADDR(addr, asid);
if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && level <= 3) {
u64 ttl = level | (get_trans_granule() << 2);
@@ -154,6 +155,11 @@ static __always_inline void __tlbi_level(const enum tlbi_op op, u64 addr, u32 le
#undef __GEN_TLBI_OP_ASID_CASE
#undef ___GEN_TLBI_OP_CASE
+static inline void __tlbi_level(const enum tlbi_op op, u64 addr, u32 level)
+{
+ __tlbi_level_asid(op, addr, level, 0);
+}
+
/*
* This macro creates a properly formatted VA operand for the TLB RANGE. The
* value bit assignments are:
@@ -449,8 +455,7 @@ do { \
if (!system_supports_tlb_range() || \
__flush_pages == 1 || \
(lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
- addr = __TLBI_VADDR(__flush_start, asid); \
- __tlbi_level(op, addr, tlb_level); \
+ __tlbi_level_asid(op, __flush_start, tlb_level, asid); \
__flush_start += stride; \
__flush_pages -= stride >> PAGE_SHIFT; \
continue; \
@@ -580,6 +585,7 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
__flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3);
}
+#undef __TLBI_VADDR
#undef __tlbi_user
#endif
#endif
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 4a609e9b65de..ad4857df4830 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -36,7 +36,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
* The workaround requires an inner-shareable tlbi.
* We pick the reserved-ASID to minimise the impact.
*/
- __tlbi(aside1is, __TLBI_VADDR(0, 0));
+ __tlbi(aside1is, 0UL);
dsb(ish);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index ae8391baebc3..581385b21826 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -270,7 +270,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
* https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
*/
dsb(ishst);
- __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
+ __tlbi_level(vale2is, addr, level);
dsb(ish);
isb();
}
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index c351b4abd5db..540691987e13 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -472,14 +472,14 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
+ __tlbi_level(vae2is, ctx->addr, TLBI_TTL_UNKNOWN);
} else {
if (ctx->end - ctx->addr < granule)
return -EINVAL;
kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+ __tlbi_level(vale2is, ctx->addr, ctx->level);
*unmapped += granule;
}
--
2.50.0.727.gbf7dc18ff4-goog
Powered by blists - more mailing lists