[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <d35f42f7b598f629437940f941826e2cc49a97f6.1654271618.git.baolin.wang@linux.alibaba.com>
Date: Sat, 4 Jun 2022 09:32:31 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org
Cc: baolin.wang@...ux.alibaba.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH 3/3] mm: Add kernel PTE level pagetable pages account
Now the kernel PTE level ptes are always protected by mm->page_table_lock
instead of split pagetable lock, so the kernel PTE level pagetable pages
are not accounted. To get an accurate pagetable accounting, calling new
helpers pgtable_set_and_inc()/pgtable_clear_and_dec() when allocating or
freeing a kernel PTE level pagetable page.
Meanwhile converting architectures to use corresponding generic PTE pagetable
allocation and freeing functions.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
arch/csky/include/asm/pgalloc.h | 2 +-
arch/microblaze/mm/pgtable.c | 2 +-
arch/openrisc/mm/ioremap.c | 2 +-
include/asm-generic/pgalloc.h | 10 +++++++++-
4 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bbbd069..2443226 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -29,7 +29,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
pte_t *pte;
unsigned long i;
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ pte = __pte_alloc_one_kernel(mm);
if (!pte)
return NULL;
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 9f73265..e96dd1b 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -245,7 +245,7 @@ unsigned long iopa(unsigned long addr)
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
if (mem_init_done)
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ return __pte_alloc_one_kernel(mm);
else
return memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
MEMBLOCK_LOW_LIMIT,
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index daae13a..3453acc 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -118,7 +118,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
pte_t *pte;
if (likely(mem_init_done)) {
- pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
+ pte = __pte_alloc_one_kernel(mm);
} else {
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 78ab9f6..f5345b2 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -18,7 +18,14 @@
*/
static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
{
- return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
+ struct page *page;
+ gfp_t gfp = GFP_PGTABLE_KERNEL;
+
+ page = alloc_pages(gfp & ~__GFP_HIGHMEM, 0);
+ if (!page)
+ return NULL;
+ pgtable_set_and_inc(page);
+ return (pte_t *)page_address(page);
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
@@ -41,6 +48,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
+ pgtable_clear_and_dec(virt_to_page(pte));
free_page((unsigned long)pte);
}
--
1.8.3.1
Powered by blists - more mailing lists