[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231122010815.3545294-5-samuel.holland@sifive.com>
Date: Tue, 21 Nov 2023 17:07:15 -0800
From: Samuel Holland <samuel.holland@...ive.com>
To: Palmer Dabbelt <palmer@...belt.com>,
linux-riscv@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Alexandre Ghiti <alexghiti@...osinc.com>,
Samuel Holland <samuel.holland@...ive.com>
Subject: [PATCH v3 4/8] riscv: mm: Introduce cntx2asid/cntx2version helper macros
When using the ASID allocator, the MM context ID contains two values:
the ASID in the lower bits, and the allocator version number in the
remaining bits. Use macros to make this separation more obvious.
Signed-off-by: Samuel Holland <samuel.holland@...ive.com>
---
(no changes since v1)
arch/riscv/include/asm/mmu.h | 3 +++
arch/riscv/mm/context.c | 12 ++++++------
arch/riscv/mm/tlbflush.c | 4 ++--
3 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 355504b37f8e..a550fbf770be 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -26,6 +26,9 @@ typedef struct {
#endif
} mm_context_t;
+#define cntx2asid(cntx) ((cntx) & asid_mask)
+#define cntx2version(cntx) ((cntx) & ~asid_mask)
+
void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot);
#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 217fd4de6134..43d005f63253 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -81,7 +81,7 @@ static void __flush_context(void)
if (cntx == 0)
cntx = per_cpu(reserved_context, i);
- __set_bit(cntx & asid_mask, context_asid_map);
+ __set_bit(cntx2asid(cntx), context_asid_map);
per_cpu(reserved_context, i) = cntx;
}
@@ -102,7 +102,7 @@ static unsigned long __new_context(struct mm_struct *mm)
lockdep_assert_held(&context_lock);
if (cntx != 0) {
- unsigned long newcntx = ver | (cntx & asid_mask);
+ unsigned long newcntx = ver | cntx2asid(cntx);
/*
* If our current CONTEXT was active during a rollover, we
@@ -115,7 +115,7 @@ static unsigned long __new_context(struct mm_struct *mm)
* We had a valid CONTEXT in a previous life, so try to
* re-use it if possible.
*/
- if (!__test_and_set_bit(cntx & asid_mask, context_asid_map))
+ if (!__test_and_set_bit(cntx2asid(cntx), context_asid_map))
return newcntx;
}
@@ -168,7 +168,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
*/
old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
if (old_active_cntx &&
- ((cntx & ~asid_mask) == atomic_long_read(¤t_version)) &&
+ (cntx2version(cntx) == atomic_long_read(¤t_version)) &&
atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
old_active_cntx, cntx))
goto switch_mm_fast;
@@ -177,7 +177,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
/* Check that our ASID belongs to the current_version. */
cntx = atomic_long_read(&mm->context.id);
- if ((cntx & ~asid_mask) != atomic_long_read(¤t_version)) {
+ if (cntx2version(cntx) != atomic_long_read(¤t_version)) {
cntx = __new_context(mm);
atomic_long_set(&mm->context.id, cntx);
}
@@ -191,7 +191,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
switch_mm_fast:
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
- ((cntx & asid_mask) << SATP_ASID_SHIFT) |
+ (cntx2asid(cntx) << SATP_ASID_SHIFT) |
satp_mode);
if (need_flush_tlb)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 27b3744b5673..23409d70440f 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -91,7 +91,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
if (static_branch_unlikely(&use_asid_allocator))
- asid = atomic_long_read(&mm->context.id) & asid_mask;
+ asid = cntx2asid(atomic_long_read(&mm->context.id));
} else {
cmask = cpu_online_mask;
broadcast = true;
@@ -123,7 +123,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long asid = FLUSH_TLB_NO_ASID;
if (mm && static_branch_unlikely(&use_asid_allocator))
- asid = atomic_long_read(&mm->context.id) & asid_mask;
+ asid = cntx2asid(atomic_long_read(&mm->context.id));
local_flush_tlb_range_asid(start, size, stride, asid);
}
--
2.42.0
Powered by blists - more mailing lists