[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231122010815.3545294-2-samuel.holland@sifive.com>
Date: Tue, 21 Nov 2023 17:07:12 -0800
From: Samuel Holland <samuel.holland@...ive.com>
To: Palmer Dabbelt <palmer@...belt.com>,
linux-riscv@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Alexandre Ghiti <alexghiti@...osinc.com>,
Samuel Holland <samuel.holland@...ive.com>
Subject: [PATCH v3 1/8] riscv: mm: Combine the SMP and UP TLB flush code
In SMP configurations, all TLB flushing narrower than flush_tlb_all()
goes through __flush_tlb_range(). Do the same in UP configurations.
This allows UP configurations to take advantage of recent improvements
to the code in tlbflush.c, such as support for huge pages and flushing
multiple-page ranges.
Signed-off-by: Samuel Holland <samuel.holland@...ive.com>
---
(no changes since v2)
Changes in v2:
- Move the SMP/UP merge earlier in the series to avoid build issues
- Make a copy of __flush_tlb_range() instead of adding ifdefs inside
- local_flush_tlb_all() is the only function used on !MMU (smpboot.c)
arch/riscv/include/asm/tlbflush.h | 33 +++++++------------------------
arch/riscv/mm/Makefile | 5 +----
arch/riscv/mm/tlbflush.c | 13 ++++++++++++
3 files changed, 21 insertions(+), 30 deletions(-)
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 8f3418c5f172..317a1811aa51 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -27,13 +27,12 @@ static inline void local_flush_tlb_page(unsigned long addr)
{
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
}
-#else /* CONFIG_MMU */
-#define local_flush_tlb_all() do { } while (0)
-#define local_flush_tlb_page(addr) do { } while (0)
-#endif /* CONFIG_MMU */
-#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
+#ifdef CONFIG_SMP
void flush_tlb_all(void);
+#else
+#define flush_tlb_all() local_flush_tlb_all()
+#endif
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int page_size);
@@ -46,26 +45,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end);
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#endif
-#else /* CONFIG_SMP && CONFIG_MMU */
-
-#define flush_tlb_all() local_flush_tlb_all()
-#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- local_flush_tlb_all();
-}
-
-/* Flush a range of kernel pages */
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- local_flush_tlb_all();
-}
-
-#define flush_tlb_mm(mm) flush_tlb_all()
-#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
-#endif /* !CONFIG_SMP || !CONFIG_MMU */
+#else /* CONFIG_MMU */
+#define local_flush_tlb_all() do { } while (0)
+#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 3a4dfc8babcf..96e65c571ce8 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,15 +13,12 @@ endif
KCOV_INSTRUMENT_init.o := n
obj-y += init.o
-obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o
+obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o tlbflush.o
obj-y += cacheflush.o
obj-y += context.o
obj-y += pgtable.o
obj-y += pmem.o
-ifeq ($(CONFIG_MMU),y)
-obj-$(CONFIG_SMP) += tlbflush.o
-endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index e6659d7368b3..22d7ed5abf8e 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -66,6 +66,7 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
local_flush_tlb_range_threshold_asid(start, size, stride, asid);
}
+#ifdef CONFIG_SMP
static void __ipi_flush_tlb_all(void *info)
{
local_flush_tlb_all();
@@ -138,6 +139,18 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
if (mm)
put_cpu();
}
+#else
+static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long size, unsigned long stride)
+{
+ unsigned long asid = FLUSH_TLB_NO_ASID;
+
+ if (mm && static_branch_unlikely(&use_asid_allocator))
+ asid = atomic_long_read(&mm->context.id) & asid_mask;
+
+ local_flush_tlb_range_asid(start, size, stride, asid);
+}
+#endif
void flush_tlb_mm(struct mm_struct *mm)
{
--
2.42.0
Powered by blists - more mailing lists