[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191211122956.227316370@infradead.org>
Date: Wed, 11 Dec 2019 13:07:23 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Will Deacon <will@...nel.org>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Nick Piggin <npiggin@...il.com>,
Peter Zijlstra <peterz@...radead.org>
Cc: linux-arch@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Yoshinori Sato <ysato@...rs.sourceforge.jp>,
Rich Felker <dalias@...c.org>,
"David S. Miller" <davem@...emloft.net>,
Helge Deller <deller@....de>,
Geert Uytterhoeven <geert@...ux-m68k.org>,
Paul Burton <paulburton@...nel.org>,
Tony Luck <tony.luck@...el.com>,
Richard Henderson <rth@...ddle.net>,
Nick Hu <nickhu@...estech.com>,
Paul Walmsley <paul.walmsley@...ive.com>
Subject: [PATCH 10/17] sparc32/tlb: Fix __p*_free_tlb()
Just like regular pages, page directories need to observe the
following order:
1) unhook
2) TLB invalidate
3) free
to ensure it is safe against concurrent accesses.
Because Sparc32 has non-page based page directories, use a custom
table freeer.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/sparc/Kconfig | 1 +
arch/sparc/include/asm/pgalloc_32.h | 7 +++++--
arch/sparc/mm/srmmu.c | 28 ++++++++++++++++++++++++++++
3 files changed, 34 insertions(+), 2 deletions(-)
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -57,6 +57,7 @@ config SPARC32
select CLZ_TAB
select HAVE_UID16
select OLD_SIGACTION
+ select MMU_GATHER_TABLE_FREE
config SPARC64
def_bool 64BIT
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -12,6 +12,9 @@
struct page;
+extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int idx);
+extern void __tlb_remove_table(void *table);
+
void *srmmu_get_nocache(int size, int align);
void srmmu_free_nocache(void *addr, int size);
@@ -48,7 +51,7 @@ static inline void free_pmd_fast(pmd_t *
}
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
-#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+#define __pmd_free_tlb(tlb, pmd, addr) pgtable_free_tlb((tlb), (pmd), 1)
void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
#define pmd_pgtable(pmd) pmd_page(pmd)
@@ -72,6 +75,6 @@ static inline void free_pte_fast(pte_t *
#define pte_free_kernel(mm, pte) free_pte_fast(pte)
void pte_free(struct mm_struct * mm, pgtable_t pte);
-#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+#define __pte_free_tlb(tlb, pte, addr) pgtable_free_tlb((tlb), (pte), 0)
#endif /* _SPARC_PGALLOC_H */
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -38,6 +38,7 @@
#include <asm/page.h>
#include <asm/asi.h>
#include <asm/smp.h>
+#include <asm/tlb.h>
#include <asm/io.h>
/* Now the cpu specific definitions. */
@@ -1849,3 +1850,30 @@ void __init load_mmu(void)
sun4m_init_smp();
#endif
}
+
+#define TLB_IDX_MASK 1UL
+
+void __tlb_remove_table(void *table)
+{
+ void *dir = (void *)((unsigned long)table & ~TLB_IDX_MASK);
+ int idx = (unsigned long)table & TLB_IDX_MASK;
+
+ switch (idx) {
+ case 1: /* PMD */
+ pmd_free(NULL, dir);
+ break;
+ case 0: /* PTE */
+ pte_free(NULL, dir);
+ break;
+ }
+}
+
+void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int idx)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(idx > TLB_IDX_MASK);
+ BUG_ON(pgf & TLB_IDX_MASK);
+ pgf |= idx;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
Powered by blists - more mailing lists