[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <1560930553-26502-5-git-send-email-guoren@kernel.org>
Date: Wed, 19 Jun 2019 15:49:13 +0800
From: guoren@...nel.org
To: julien.grall@....com, arnd@...db.de, linux-kernel@...r.kernel.org
Cc: linux-csky@...r.kernel.org, Guo Ren <ren_guo@...ky.com>
Subject: [PATCH 4/4] csky: Improve tlb operation with help of asid
From: Guo Ren <ren_guo@...ky.com>
There are two generations of tlb operation instruction for C-SKY.
First generation is use mcr register and it need software do more
things, second generation is use specific instructions, eg:
tlbi.va, tlbi.vas, tlbi.alls
We implemented the following functions:
- flush_tlb_range (a range of entries)
- flush_tlb_page (one entry)
Above functions use asid from vma->mm to invalid tlb entries and
we could use tlbi.vas instruction for newest generation csky cpu.
- flush_tlb_kernel_range
- flush_tlb_one
Above functions don't care asid and it invalid the tlb entries only
with vpn and we could use tlbi.vaas instruction for newest generat-
ion csky cpu.
Signed-off-by: Guo Ren <ren_guo@...ky.com>
Cc: Julien Grall <julien.grall@....com>
Cc: Arnd Bergmann <arnd@...db.de>
---
arch/csky/mm/tlb.c | 136 +++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 132 insertions(+), 4 deletions(-)
diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c
index efae81c..eb3ba6c 100644
--- a/arch/csky/mm/tlb.c
+++ b/arch/csky/mm/tlb.c
@@ -10,6 +10,13 @@
#include <asm/pgtable.h>
#include <asm/setup.h>
+/*
+ * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
+ * 1VPN -> 2PFN
+ */
+#define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
+#define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
+
void flush_tlb_all(void)
{
tlb_invalid_all();
@@ -17,27 +24,148 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm)
{
+#ifdef CONFIG_CPU_HAS_TLBI
+ asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
+#else
tlb_invalid_all();
+#endif
}
+/*
+ * MMU operation regs only could invalid tlb entry in jtlb and we
+ * need change asid field to invalid I-utlb & D-utlb.
+ */
+#ifndef CONFIG_CPU_HAS_TLBI
+#define restore_asid_inv_utlb(oldpid, newpid) \
+do { \
+ if (oldpid == newpid) \
+ write_mmu_entryhi(oldpid + 1); \
+ write_mmu_entryhi(oldpid); \
+} while (0)
+#endif
+
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- tlb_invalid_all();
+ unsigned long newpid = cpu_asid(vma->vm_mm);
+
+ start &= TLB_ENTRY_SIZE_MASK;
+ end += TLB_ENTRY_SIZE - 1;
+ end &= TLB_ENTRY_SIZE_MASK;
+
+#ifdef CONFIG_CPU_HAS_TLBI
+ while (start < end) {
+ asm volatile("tlbi.vas %0"::"r"(start | newpid));
+ start += 2*PAGE_SIZE;
+ }
+ sync_is();
+#else
+ {
+ unsigned long flags, oldpid;
+
+ local_irq_save(flags);
+ oldpid = read_mmu_entryhi() & ASID_MASK;
+ while (start < end) {
+ int idx;
+
+ write_mmu_entryhi(start | newpid);
+ start += 2*PAGE_SIZE;
+ tlb_probe();
+ idx = read_mmu_index();
+ if (idx >= 0)
+ tlb_invalid_indexed();
+ }
+ restore_asid_inv_utlb(oldpid, newpid);
+ local_irq_restore(flags);
+ }
+#endif
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- tlb_invalid_all();
+ start &= TLB_ENTRY_SIZE_MASK;
+ end += TLB_ENTRY_SIZE - 1;
+ end &= TLB_ENTRY_SIZE_MASK;
+
+#ifdef CONFIG_CPU_HAS_TLBI
+ while (start < end) {
+ asm volatile("tlbi.vaas %0"::"r"(start));
+ start += 2*PAGE_SIZE;
+ }
+ sync_is();
+#else
+ {
+ unsigned long flags, oldpid;
+
+ local_irq_save(flags);
+ oldpid = read_mmu_entryhi() & ASID_MASK;
+ while (start < end) {
+ int idx;
+
+ write_mmu_entryhi(start | oldpid);
+ start += 2*PAGE_SIZE;
+ tlb_probe();
+ idx = read_mmu_index();
+ if (idx >= 0)
+ tlb_invalid_indexed();
+ }
+ restore_asid_inv_utlb(oldpid, oldpid);
+ local_irq_restore(flags);
+ }
+#endif
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- tlb_invalid_all();
+ int newpid = cpu_asid(vma->vm_mm);
+
+ addr &= TLB_ENTRY_SIZE_MASK;
+
+#ifdef CONFIG_CPU_HAS_TLBI
+ asm volatile("tlbi.vas %0"::"r"(addr | newpid));
+ sync_is();
+#else
+ {
+ int oldpid, idx;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ oldpid = read_mmu_entryhi() & ASID_MASK;
+ write_mmu_entryhi(addr | newpid);
+ tlb_probe();
+ idx = read_mmu_index();
+ if (idx >= 0)
+ tlb_invalid_indexed();
+
+ restore_asid_inv_utlb(oldpid, newpid);
+ local_irq_restore(flags);
+ }
+#endif
}
void flush_tlb_one(unsigned long addr)
{
- tlb_invalid_all();
+ addr &= TLB_ENTRY_SIZE_MASK;
+
+#ifdef CONFIG_CPU_HAS_TLBI
+ asm volatile("tlbi.vaas %0"::"r"(addr));
+ sync_is();
+#else
+ {
+ int oldpid, idx;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ oldpid = read_mmu_entryhi() & ASID_MASK;
+ write_mmu_entryhi(addr | oldpid);
+ tlb_probe();
+ idx = read_mmu_index();
+ if (idx >= 0)
+ tlb_invalid_indexed();
+
+ restore_asid_inv_utlb(oldpid, oldpid);
+ local_irq_restore(flags);
+ }
+#endif
}
EXPORT_SYMBOL(flush_tlb_one);
--
2.7.4
Powered by blists - more mailing lists