[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <168095071741.404.18009750298069050198.tip-bot2@tip-bot2>
Date: Sat, 08 Apr 2023 10:45:17 -0000
From: "irqchip-bot for Anup Patel" <tip-bot2@...utronix.de>
To: linux-kernel@...r.kernel.org
Cc: Anup Patel <apatel@...tanamicro.com>,
Atish Patra <atishp@...osinc.com>,
Palmer Dabbelt <palmer@...osinc.com>,
Marc Zyngier <maz@...nel.org>, tglx@...utronix.de
Subject: [irqchip: irq/irqchip-next] RISC-V: Use IPIs for remote TLB flush
when possible
The following commit has been merged into the irq/irqchip-next branch of irqchip:
Commit-ID: 18d2199d81054f44e6d2a51177cc80566f43bf23
Gitweb: https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms/18d2199d81054f44e6d2a51177cc80566f43bf23
Author: Anup Patel <apatel@...tanamicro.com>
AuthorDate: Tue, 28 Mar 2023 09:22:21 +05:30
Committer: Marc Zyngier <maz@...nel.org>
CommitterDate: Sat, 08 Apr 2023 11:26:24 +01:00
RISC-V: Use IPIs for remote TLB flush when possible
If we have specialized interrupt controller (such as AIA IMSIC) which
allows supervisor mode to directly inject IPIs without any assistance
from M-mode or HS-mode then using such specialized interrupt controller,
we can do remote TLB flushes directly from supervisor mode instead of
using the SBI RFENCE calls.
This patch extends remote TLB flush functions to use supervisor mode
IPIs whenever direct supervisor mode IPIs.are supported by interrupt
controller.
Signed-off-by: Anup Patel <apatel@...tanamicro.com>
Reviewed-by: Atish Patra <atishp@...osinc.com>
Acked-by: Palmer Dabbelt <palmer@...osinc.com>
Signed-off-by: Marc Zyngier <maz@...nel.org>
Link: https://lore.kernel.org/r/20230328035223.1480939-6-apatel@ventanamicro.com
---
arch/riscv/mm/tlbflush.c | 93 ++++++++++++++++++++++++++++++++-------
1 file changed, 78 insertions(+), 15 deletions(-)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index ef701fa..77be59a 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -23,14 +23,62 @@ static inline void local_flush_tlb_page_asid(unsigned long addr,
: "memory");
}
+static inline void local_flush_tlb_range(unsigned long start,
+ unsigned long size, unsigned long stride)
+{
+ if (size <= stride)
+ local_flush_tlb_page(start);
+ else
+ local_flush_tlb_all();
+}
+
+static inline void local_flush_tlb_range_asid(unsigned long start,
+ unsigned long size, unsigned long stride, unsigned long asid)
+{
+ if (size <= stride)
+ local_flush_tlb_page_asid(start, asid);
+ else
+ local_flush_tlb_all_asid(asid);
+}
+
+static void __ipi_flush_tlb_all(void *info)
+{
+ local_flush_tlb_all();
+}
+
void flush_tlb_all(void)
{
- sbi_remote_sfence_vma(NULL, 0, -1);
+ if (riscv_use_ipi_for_rfence())
+ on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
+ else
+ sbi_remote_sfence_vma(NULL, 0, -1);
+}
+
+struct flush_tlb_range_data {
+ unsigned long asid;
+ unsigned long start;
+ unsigned long size;
+ unsigned long stride;
+};
+
+static void __ipi_flush_tlb_range_asid(void *info)
+{
+ struct flush_tlb_range_data *d = info;
+
+ local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
+}
+
+static void __ipi_flush_tlb_range(void *info)
+{
+ struct flush_tlb_range_data *d = info;
+
+ local_flush_tlb_range(d->start, d->size, d->stride);
}
-static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
- unsigned long size, unsigned long stride)
+static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long size, unsigned long stride)
{
+ struct flush_tlb_range_data ftd;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
bool broadcast;
@@ -45,19 +93,34 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
if (broadcast) {
- sbi_remote_sfence_vma_asid(cmask, start, size, asid);
- } else if (size <= stride) {
- local_flush_tlb_page_asid(start, asid);
+ if (riscv_use_ipi_for_rfence()) {
+ ftd.asid = asid;
+ ftd.start = start;
+ ftd.size = size;
+ ftd.stride = stride;
+ on_each_cpu_mask(cmask,
+ __ipi_flush_tlb_range_asid,
+ &ftd, 1);
+ } else
+ sbi_remote_sfence_vma_asid(cmask,
+ start, size, asid);
} else {
- local_flush_tlb_all_asid(asid);
+ local_flush_tlb_range_asid(start, size, stride, asid);
}
} else {
if (broadcast) {
- sbi_remote_sfence_vma(cmask, start, size);
- } else if (size <= stride) {
- local_flush_tlb_page(start);
+ if (riscv_use_ipi_for_rfence()) {
+ ftd.asid = 0;
+ ftd.start = start;
+ ftd.size = size;
+ ftd.stride = stride;
+ on_each_cpu_mask(cmask,
+ __ipi_flush_tlb_range,
+ &ftd, 1);
+ } else
+ sbi_remote_sfence_vma(cmask, start, size);
} else {
- local_flush_tlb_all();
+ local_flush_tlb_range(start, size, stride);
}
}
@@ -66,23 +129,23 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
void flush_tlb_mm(struct mm_struct *mm)
{
- __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
+ __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
+ __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
+ __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
+ __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
}
#endif
Powered by blists - more mailing lists