[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190624054311.30256-13-hch@lst.de>
Date: Mon, 24 Jun 2019 07:43:06 +0200
From: Christoph Hellwig <hch@....de>
To: Palmer Dabbelt <palmer@...ive.com>,
Paul Walmsley <paul.walmsley@...ive.com>
Cc: Damien Le Moal <damien.lemoal@....com>,
linux-riscv@...ts.infradead.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 12/17] riscv: implement remote sfence.i natively for M-mode
The RISC-V ISA only supports flushing the instruction cache for the local
CPU core. For normal S-mode Linux remote flushing is offloaded to
machine mode using ecalls, but for M-mode Linux we'll have to do it
ourselves. Use the same implementation as all the existing open source
SBI implementations by just doing an IPI to all remote cores to execute
th sfence.i instruction on every live core.
Signed-off-by: Christoph Hellwig <hch@....de>
---
arch/riscv/mm/cacheflush.c | 31 +++++++++++++++++++++++++++----
1 file changed, 27 insertions(+), 4 deletions(-)
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 9ebcff8ba263..10875ea1065e 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -10,10 +10,35 @@
#include <asm/sbi.h>
+#ifdef CONFIG_M_MODE
+static void ipi_remote_fence_i(void *info)
+{
+ return local_flush_icache_all();
+}
+
+void flush_icache_all(void)
+{
+ on_each_cpu(ipi_remote_fence_i, NULL, 1);
+}
+
+static void flush_icache_cpumask(const cpumask_t *mask)
+{
+ on_each_cpu_mask(mask, ipi_remote_fence_i, NULL, 1);
+}
+#else /* CONFIG_M_MODE */
void flush_icache_all(void)
{
sbi_remote_fence_i(NULL);
}
+static void flush_icache_cpumask(const cpumask_t *mask)
+{
+ cpumask_t hmask;
+
+ cpumask_clear(&hmask);
+ riscv_cpuid_to_hartid_mask(mask, &hmask);
+ sbi_remote_fence_i(hmask.bits);
+}
+#endif /* CONFIG_M_MODE */
/*
* Performs an icache flush for the given MM context. RISC-V has no direct
@@ -28,7 +53,7 @@ void flush_icache_all(void)
void flush_icache_mm(struct mm_struct *mm, bool local)
{
unsigned int cpu;
- cpumask_t others, hmask, *mask;
+ cpumask_t others, *mask;
preempt_disable();
@@ -47,9 +72,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
if (mm != current->active_mm || !local) {
- cpumask_clear(&hmask);
- riscv_cpuid_to_hartid_mask(&others, &hmask);
- sbi_remote_fence_i(hmask.bits);
+ flush_icache_cpumask(&others);
} else {
/*
* It's assumed that at least one strongly ordered operation is
--
2.20.1
Powered by blists - more mailing lists