[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220624173656.2033256-7-jthoughton@google.com>
Date: Fri, 24 Jun 2022 17:36:36 +0000
From: James Houghton <jthoughton@...gle.com>
To: Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <songmuchun@...edance.com>,
Peter Xu <peterx@...hat.com>
Cc: David Hildenbrand <david@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Mina Almasry <almasrymina@...gle.com>,
Jue Wang <juew@...gle.com>,
Manish Mishra <manish.mishra@...anix.com>,
"Dr . David Alan Gilbert" <dgilbert@...hat.com>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
James Houghton <jthoughton@...gle.com>
Subject: [RFC PATCH 06/26] mm: make free_p?d_range functions public
This makes them usable for HugeTLB page table freeing operations.
After HugeTLB high-granularity mapping, the page table for a HugeTLB VMA
can get more complex, and these functions handle freeing page tables
generally.
Signed-off-by: James Houghton <jthoughton@...gle.com>
---
include/linux/mm.h | 7 +++++++
mm/memory.c | 8 ++++----
2 files changed, 11 insertions(+), 4 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bc8f326be0ce..07f5da512147 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1847,6 +1847,13 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
struct mmu_notifier_range;
+void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr);
+void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
+void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
+void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
diff --git a/mm/memory.c b/mm/memory.c
index 7a089145cad4..bb3b9b5b94fb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -227,7 +227,7 @@ static void check_sync_rss_stat(struct task_struct *task)
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
-static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
pgtable_t token = pmd_pgtable(*pmd);
@@ -236,7 +236,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
mm_dec_nr_ptes(tlb->mm);
}
-static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
@@ -270,7 +270,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
mm_dec_nr_pmds(tlb->mm);
}
-static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
+inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
@@ -304,7 +304,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
mm_dec_nr_puds(tlb->mm);
}
-static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
+inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
--
2.37.0.rc0.161.g10f37bed90-goog
Powered by blists - more mailing lists