[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <aad6ee0c99b5f38fe680c160e20e11193b963787.1527608397.git.christophe.leroy@c-s.fr>
Date: Tue, 29 May 2018 15:50:40 +0000 (UTC)
From: Christophe Leroy <christophe.leroy@....fr>
To: Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>,
aneesh.kumar@...ux.vnet.ibm.com
Cc: linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org
Subject: [PATCH v3 13/14] powerpc/mm: Use pte_fragment_alloc() on 8xx
In 16k page size mode, the 8xx need only 4k for a page table.
This patch makes use of the pte_fragment functions in order
to avoid wasting memory space
Signed-off-by: Christophe Leroy <christophe.leroy@....fr>
---
arch/powerpc/include/asm/mmu-8xx.h | 4 +++
arch/powerpc/include/asm/nohash/32/pgalloc.h | 44 +++++++++++++++++++++++++++-
arch/powerpc/include/asm/nohash/32/pgtable.h | 10 ++++++-
arch/powerpc/mm/mmu_context_nohash.c | 4 +++
arch/powerpc/mm/pgtable.c | 10 ++++++-
arch/powerpc/mm/pgtable_32.c | 12 ++++++++
arch/powerpc/platforms/Kconfig.cputype | 1 +
7 files changed, 82 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 193f53116c7a..4f4cb754afd8 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -190,6 +190,10 @@ typedef struct {
struct slice_mask mask_8m;
# endif
#endif
+#ifdef CONFIG_NEED_PTE_FRAG
+ /* for 4K PTE fragment support */
+ void *pte_frag;
+#endif
} mm_context_t;
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 81c19d6460bd..d65b5f29008c 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -69,11 +69,22 @@ static inline void pmd_populate_kernel_g(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
+#ifdef CONFIG_NEED_PTE_FRAG
+ *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT);
+#else
*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER |
_PMD_PRESENT);
+#endif
}
+#ifdef CONFIG_NEED_PTE_FRAG
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+ return (pgtable_t)pmd_page_vaddr(pmd);
+}
+#else
#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
#else
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
@@ -95,6 +106,32 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel_g(pmd, address))? \
NULL: pte_offset_kernel(pmd, address))
+#ifdef CONFIG_NEED_PTE_FRAG
+extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
+extern void pte_fragment_free(unsigned long *, int);
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pte_t *)pte_fragment_alloc(mm, address, 1);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pgtable_t)pte_fragment_alloc(mm, address, 0);
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ pte_fragment_free((unsigned long *)pte, 1);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pte_fragment_free((unsigned long *)ptepage, 0);
+}
+#else
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
@@ -108,11 +145,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
pgtable_page_dtor(ptepage);
__free_page(ptepage);
}
+#endif
static inline void pgtable_free(void *table, unsigned index_size)
{
if (!index_size) {
- free_page((unsigned long)table);
+ pte_free_kernel(NULL, table);
} else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(index_size), table);
@@ -150,7 +188,11 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
tlb_flush_pgtable(tlb, address);
+#ifdef CONFIG_NEED_PTE_FRAG
+ pgtable_free_tlb(tlb, table, 0);
+#else
pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page_address(table), 0);
+#endif
}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 5872d79360a9..ab5671e14fa1 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -20,6 +20,9 @@ extern int icache_44x_need_flush;
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
#define PTE_INDEX_SIZE (PTE_SHIFT - 2)
+#define PTE_FRAG_NR 4
+#define PTE_FRAG_SIZE_SHIFT 12
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
#else
#define PTE_INDEX_SIZE PTE_SHIFT
#endif
@@ -319,7 +322,7 @@ static inline int pte_young(pte_t pte)
*/
#ifndef CONFIG_BOOKE
#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+ ((unsigned long) __va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#else
@@ -342,9 +345,14 @@ static inline int pte_young(pte_t pte)
#define pte_offset_kernel(dir, addr) \
(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
pte_index(addr))
+#ifdef CONFIG_NEED_PTE_FRAG
+#define pte_offset_map(dir, addr) pte_offset_kernel(dir, addr)
+#define pte_unmap(pte) do {} while (0)
+#else
#define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte)
+#endif
/*
* Encode and decode a swap entry.
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index be8f5c9d4d08..2b048e3addff 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -344,6 +344,9 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
#endif
mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0;
+#ifdef CONFIG_NEED_PTE_FRAG
+ mm->context.pte_frag = NULL;
+#endif
return 0;
}
@@ -372,6 +375,7 @@ void destroy_context(struct mm_struct *mm)
nr_free_contexts++;
}
raw_spin_unlock_irqrestore(&context_lock, flags);
+ destroy_pagetable_page(mm);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 3b3e51e9e431..18a5cbf1d1a0 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
@@ -327,10 +328,17 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
return (pte_t *)ret;
}
-pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
+__ref pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
{
pte_t *pte;
+ if (kernel && !slab_is_available()) {
+ pte = __va(memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE));
+ if (pte)
+ memset(pte, 0, PTE_FRAG_SIZE);
+
+ return pte;
+ }
pte = get_pte_from_cache(mm);
if (pte)
return pte;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 3aa0c78db95d..5c8737cf2945 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -40,6 +40,17 @@
extern char etext[], _stext[], _sinittext[], _einittext[];
+#ifdef CONFIG_NEED_PTE_FRAG
+void pte_fragment_free(unsigned long *table, int kernel)
+{
+ struct page *page = virt_to_page(table);
+ if (put_page_testzero(page)) {
+ if (!kernel)
+ pgtable_page_dtor(page);
+ free_unref_page(page);
+ }
+}
+#else
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
@@ -69,6 +80,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
}
return ptepage;
}
+#endif
#ifdef CONFIG_PPC_GUARDED_PAGE_IN_PMD
int __pte_alloc_kernel_g(pmd_t *pmd, unsigned long address)
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index ddccae5ed192..482609732bc6 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -347,6 +347,7 @@ config NEED_PAGE_FRAG
config NEED_PTE_FRAG
bool
default y if PPC_BOOK3S_64
+ default y if PPC_8xx && PPC_16K_PAGES
default n
select NEED_PAGE_FRAG
--
2.13.3
Powered by blists - more mailing lists