[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20201116103058.5461-3-jcmvbkbc@gmail.com>
Date: Mon, 16 Nov 2020 02:30:58 -0800
From: Max Filippov <jcmvbkbc@...il.com>
To: linux-xtensa@...ux-xtensa.org
Cc: Chris Zankel <chris@...kel.net>, linux-kernel@...r.kernel.org,
Max Filippov <jcmvbkbc@...il.com>, stable@...r.kernel.org
Subject: [PATCH 2/2] xtensa: disable preemption around cache alias management calls
Although cache alias management calls set up and tear down TLB entries
and fast_second_level_miss is able to restore TLB entry should it be
evicted they absolutely cannot preempt each other because they use the
same TLBTEMP area for different purposes.
Disable preemption around all cache alias management calls to enforce
that.
Cc: stable@...r.kernel.org
Signed-off-by: Max Filippov <jcmvbkbc@...il.com>
---
arch/xtensa/mm/cache.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 5835406b3cec..085b8c77b9d9 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -70,8 +70,10 @@ static inline void kmap_invalidate_coherent(struct page *page,
kvaddr = TLBTEMP_BASE_1 +
(page_to_phys(page) & DCACHE_ALIAS_MASK);
+ preempt_disable();
__invalidate_dcache_page_alias(kvaddr,
page_to_phys(page));
+ preempt_enable();
}
}
}
@@ -156,6 +158,7 @@ void flush_dcache_page(struct page *page)
if (!alias && !mapping)
return;
+ preempt_disable();
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);
@@ -166,6 +169,7 @@ void flush_dcache_page(struct page *page)
if (mapping)
__invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
}
/* There shouldn't be an entry in the cache for this page anymore. */
@@ -199,8 +203,10 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
unsigned long phys = page_to_phys(pfn_to_page(pfn));
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
}
EXPORT_SYMBOL(local_flush_cache_page);
@@ -227,11 +233,13 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
unsigned long phys = page_to_phys(page);
unsigned long tmp;
+ preempt_disable();
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(tmp, phys);
+ preempt_enable();
clear_bit(PG_arch_1, &page->flags);
}
@@ -265,7 +273,9 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
}
/* Copy data */
@@ -280,9 +290,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_alias(t, phys);
+ preempt_enable();
} else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len);
@@ -304,7 +316,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
}
memcpy(dst, src, len);
--
2.20.1
Powered by blists - more mailing lists