[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201026145114.59424-11-songmuchun@bytedance.com>
Date: Mon, 26 Oct 2020 22:51:05 +0800
From: Muchun Song <songmuchun@...edance.com>
To: corbet@....net, mike.kravetz@...cle.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
viro@...iv.linux.org.uk, akpm@...ux-foundation.org,
paulmck@...nel.org, mchehab+huawei@...nel.org,
pawan.kumar.gupta@...ux.intel.com, rdunlap@...radead.org,
oneukum@...e.com, anshuman.khandual@....com, jroedel@...e.de,
almasrymina@...gle.com, rientjes@...gle.com, willy@...radead.org
Cc: duanxiongchun@...edance.com, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v2 10/19] mm/hugetlb: Introduce remap_huge_page_pmd_vmemmap helper
The __free_huge_page_pmd_vmemmap and __remap_huge_page_pmd_vmemmap are
almost the same code. So introduce remap_free_huge_page_pmd_vmemmap
helper to simplify the code.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/hugetlb.c | 98 +++++++++++++++++++++-------------------------------
1 file changed, 39 insertions(+), 59 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cea580058a16..bd0c4e7fd994 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1482,6 +1482,41 @@ static inline int freed_vmemmap_hpage_dec(struct page *page)
return atomic_dec_return_relaxed(&page->_mapcount) + 1;
}
+typedef void (*remap_pte_fn)(struct page *reuse, pte_t *ptep,
+ unsigned long start, unsigned int nr_pages,
+ struct list_head *pages);
+
+static void remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
+ unsigned long addr,
+ struct list_head *pages,
+ remap_pte_fn remap_fn)
+{
+ unsigned long next;
+ unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
+ unsigned long end = addr + nr_vmemmap_size(h);
+ struct page *reuse = NULL;
+
+ flush_cache_vunmap(start, end);
+
+ addr = start;
+ do {
+ unsigned int nr_pages;
+ pte_t *ptep;
+
+ ptep = pte_offset_kernel(pmd, addr);
+ if (!reuse) {
+ reuse = pte_page(ptep[-1]);
+ set_page_private(reuse, addr - PAGE_SIZE);
+ }
+
+ next = vmemmap_hpage_addr_end(addr, end);
+ nr_pages = (next - addr) >> PAGE_SHIFT;
+ remap_fn(reuse, ptep, addr, nr_pages, pages);
+ } while (pmd++, addr = next, addr != end);
+
+ flush_tlb_kernel_range(start, end);
+}
+
static inline void free_vmemmap_page_list(struct list_head *list)
{
struct page *page, *next;
@@ -1513,33 +1548,6 @@ static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
}
}
-static void __free_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
- unsigned long addr,
- struct list_head *free_pages)
-{
- unsigned long next;
- unsigned long start = addr + RESERVE_VMEMMAP_NR * PAGE_SIZE;
- unsigned long end = addr + nr_vmemmap_size(h);
- struct page *reuse = NULL;
-
- addr = start;
- do {
- unsigned int nr_pages;
- pte_t *ptep;
-
- ptep = pte_offset_kernel(pmd, addr);
- if (!reuse)
- reuse = pte_page(ptep[-1]);
-
- next = vmemmap_hpage_addr_end(addr, end);
- nr_pages = (next - addr) >> PAGE_SHIFT;
- __free_huge_page_pte_vmemmap(reuse, ptep, addr, nr_pages,
- free_pages);
- } while (pmd++, addr = next, addr != end);
-
- flush_tlb_kernel_range(start, end);
-}
-
static void split_vmemmap_pmd(pmd_t *pmd, pte_t *pte_p, unsigned long addr)
{
struct mm_struct *mm = &init_mm;
@@ -1598,7 +1606,8 @@ static void free_huge_page_vmemmap(struct hstate *h, struct page *head)
split_vmemmap_huge_page(head, pmd);
}
- __free_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages);
+ remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages,
+ __free_huge_page_pte_vmemmap);
freed_vmemmap_hpage_inc(pmd_page(*pmd));
spin_unlock(ptl);
@@ -1638,35 +1647,6 @@ static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
}
}
-static void __remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
- unsigned long addr,
- struct list_head *remap_pages)
-{
- unsigned long next;
- unsigned long start = addr + RESERVE_VMEMMAP_NR * PAGE_SIZE;
- unsigned long end = addr + nr_vmemmap_size(h);
- struct page *reuse = NULL;
-
- addr = start;
- do {
- unsigned int nr_pages;
- pte_t *ptep;
-
- ptep = pte_offset_kernel(pmd, addr);
- if (!reuse) {
- reuse = pte_page(ptep[-1]);
- set_page_private(reuse, addr - PAGE_SIZE);
- }
-
- next = vmemmap_hpage_addr_end(addr, end);
- nr_pages = (next - addr) >> PAGE_SHIFT;
- __remap_huge_page_pte_vmemmap(reuse, ptep, addr, nr_pages,
- remap_pages);
- } while (pmd++, addr = next, addr != end);
-
- flush_tlb_kernel_range(start, end);
-}
-
static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
{
int i;
@@ -1695,8 +1675,8 @@ static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
ptl = vmemmap_pmd_lockptr(pmd);
spin_lock(ptl);
- __remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head,
- &remap_pages);
+ remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &remap_pages,
+ __remap_huge_page_pte_vmemmap);
if (!freed_vmemmap_hpage_dec(pmd_page(*pmd))) {
/*
* Todo:
--
2.20.1
Powered by blists - more mailing lists