[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201113105952.11638-12-songmuchun@bytedance.com>
Date: Fri, 13 Nov 2020 18:59:42 +0800
From: Muchun Song <songmuchun@...edance.com>
To: corbet@....net, mike.kravetz@...cle.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
viro@...iv.linux.org.uk, akpm@...ux-foundation.org,
paulmck@...nel.org, mchehab+huawei@...nel.org,
pawan.kumar.gupta@...ux.intel.com, rdunlap@...radead.org,
oneukum@...e.com, anshuman.khandual@....com, jroedel@...e.de,
almasrymina@...gle.com, rientjes@...gle.com, willy@...radead.org,
osalvador@...e.de, mhocko@...e.com
Cc: duanxiongchun@...edance.com, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v4 11/21] mm/hugetlb: Allocate the vmemmap pages associated with each hugetlb page
When we free a hugetlb page to the buddy, we should allocate the vmemmap
pages associated with it. We can do that in the __free_hugepage().
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/hugetlb.c | 2 ++
mm/hugetlb_vmemmap.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++
mm/hugetlb_vmemmap.h | 5 +++
3 files changed, 107 insertions(+)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4aabf12aca9b..ba927ae7f9bd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1382,6 +1382,8 @@ static void __free_hugepage(struct hstate *h, struct page *page)
{
int i;
+ alloc_huge_page_vmemmap(h, page);
+
for (i = 0; i < pages_per_huge_page(h); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index e6fca02b57b2..9918dc63c062 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -89,6 +89,8 @@
#define RESERVE_VMEMMAP_NR 2U
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
#define TAIL_PAGE_REUSE -1
+#define GFP_VMEMMAP_PAGE \
+ (GFP_KERNEL | __GFP_NOFAIL | __GFP_MEMALLOC)
#ifndef VMEMMAP_HPAGE_SHIFT
#define VMEMMAP_HPAGE_SHIFT HPAGE_SHIFT
@@ -219,6 +221,104 @@ static inline int freed_vmemmap_hpage_dec(struct page *page)
return atomic_dec_return_relaxed(&page->_mapcount) + 1;
}
+static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
+ unsigned long start,
+ unsigned long end,
+ struct list_head *remap_pages)
+{
+ pgprot_t pgprot = PAGE_KERNEL;
+ void *from = page_to_virt(reuse);
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ void *to;
+ struct page *page;
+ pte_t entry, old = *ptep;
+
+ page = list_first_entry_or_null(remap_pages, struct page, lru);
+ list_del(&page->lru);
+ to = page_to_virt(page);
+ copy_page(to, from);
+
+ /*
+ * Make sure that any data that writes to the @to is made
+ * visible to the physical page.
+ */
+ flush_kernel_vmap_range(to, PAGE_SIZE);
+
+ prepare_vmemmap_page(page);
+
+ entry = mk_pte(page, pgprot);
+ set_pte_at(&init_mm, addr, ptep++, entry);
+
+ VM_BUG_ON(!pte_present(old) || pte_page(old) != reuse);
+ }
+}
+
+static void __remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
+ unsigned long addr,
+ struct list_head *remap_pages)
+{
+ unsigned long next;
+ unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
+ unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
+ struct page *reuse = NULL;
+
+ addr = start;
+ do {
+ pte_t *ptep;
+
+ ptep = pte_offset_kernel(pmd, addr);
+ if (!reuse)
+ reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
+
+ next = vmemmap_hpage_addr_end(addr, end);
+ __remap_huge_page_pte_vmemmap(reuse, ptep, addr, next,
+ remap_pages);
+ } while (pmd++, addr = next, addr != end);
+
+ flush_tlb_kernel_range(start, end);
+}
+
+static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
+{
+ int i;
+
+ for (i = 0; i < free_vmemmap_pages_per_hpage(h); i++) {
+ struct page *page;
+
+ /* This should not fail */
+ page = alloc_page(GFP_VMEMMAP_PAGE);
+ list_add_tail(&page->lru, list);
+ }
+}
+
+void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+ pmd_t *pmd;
+ spinlock_t *ptl;
+ LIST_HEAD(remap_pages);
+
+ if (!free_vmemmap_pages_per_hpage(h))
+ return;
+
+ alloc_vmemmap_pages(h, &remap_pages);
+
+ pmd = vmemmap_to_pmd((unsigned long)head);
+ BUG_ON(!pmd);
+
+ ptl = vmemmap_pmd_lock(pmd);
+ __remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head,
+ &remap_pages);
+ if (!freed_vmemmap_hpage_dec(pmd_page(*pmd))) {
+ /*
+ * Todo:
+ * Merge pte to huge pmd if it has ever been split.
+ */
+ }
+ spin_unlock(ptl);
+}
+
static inline void free_vmemmap_page_list(struct list_head *list)
{
struct page *page, *next;
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index a23fb1375859..a5054f310528 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -15,6 +15,7 @@
void __init hugetlb_vmemmap_init(struct hstate *h);
int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page);
void vmemmap_pgtable_free(struct page *page);
+void alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
void free_huge_page_vmemmap(struct hstate *h, struct page *head);
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
@@ -35,6 +36,10 @@ static inline void vmemmap_pgtable_free(struct page *page)
{
}
+static inline void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+}
+
static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
{
}
--
2.11.0
Powered by blists - more mailing lists