[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201120064325.34492-9-songmuchun@bytedance.com>
Date: Fri, 20 Nov 2020 14:43:12 +0800
From: Muchun Song <songmuchun@...edance.com>
To: corbet@....net, mike.kravetz@...cle.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
viro@...iv.linux.org.uk, akpm@...ux-foundation.org,
paulmck@...nel.org, mchehab+huawei@...nel.org,
pawan.kumar.gupta@...ux.intel.com, rdunlap@...radead.org,
oneukum@...e.com, anshuman.khandual@....com, jroedel@...e.de,
almasrymina@...gle.com, rientjes@...gle.com, willy@...radead.org,
osalvador@...e.de, mhocko@...e.com, song.bao.hua@...ilicon.com
Cc: duanxiongchun@...edance.com, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v5 08/21] mm/hugetlb: Initialize page table lock for vmemmap
In the later patch, we will use the vmemmap page table lock to
guard the splitting of the vmemmap PMD. So initialize the vmemmap
page table lock.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/hugetlb_vmemmap.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 69 insertions(+)
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index ec70980000d8..bc8546df4a51 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -99,6 +99,8 @@
*/
#define pr_fmt(fmt) "HugeTLB Vmemmap: " fmt
+#include <linux/pagewalk.h>
+#include <linux/mmzone.h>
#include <linux/list.h>
#include <asm/pgalloc.h>
#include "hugetlb_vmemmap.h"
@@ -208,3 +210,70 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
pr_debug("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
h->name);
}
+
+static int __init vmemmap_pud_entry(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ struct page *page = pud_page(*pud);
+
+ /*
+ * The page->private shares storage with page->ptl. So make sure
+ * that the PG_private is not set and initialize page->private to
+ * zero.
+ */
+ VM_BUG_ON_PAGE(PagePrivate(page), page);
+ set_page_private(page, 0);
+
+ BUG_ON(!pmd_ptlock_init(page));
+
+ return 0;
+}
+
+static void __init vmemmap_ptlock_init_section(unsigned long start_pfn)
+{
+ unsigned long section_nr;
+ struct mem_section *ms;
+ struct page *memmap, *memmap_end;
+ struct mm_struct *mm = &init_mm;
+
+ const struct mm_walk_ops ops = {
+ .pud_entry = vmemmap_pud_entry,
+ };
+
+ section_nr = pfn_to_section_nr(start_pfn);
+ ms = __nr_to_section(section_nr);
+ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+ memmap_end = memmap + PAGES_PER_SECTION;
+
+ mmap_read_lock(mm);
+ BUG_ON(walk_page_range_novma(mm, (unsigned long)memmap,
+ (unsigned long)memmap_end,
+ &ops, NULL, NULL));
+ mmap_read_unlock(mm);
+}
+
+static void __init vmemmap_ptlock_init_node(int nid)
+{
+ unsigned long pfn, end_pfn;
+ struct pglist_data *pgdat = NODE_DATA(nid);
+
+ pfn = pgdat->node_start_pfn;
+ end_pfn = pgdat_end_pfn(pgdat);
+
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
+ vmemmap_ptlock_init_section(pfn);
+}
+
+static int __init vmemmap_ptlock_init(void)
+{
+ int nid;
+
+ if (!hugepages_supported())
+ return 0;
+
+ for_each_online_node(nid)
+ vmemmap_ptlock_init_node(nid);
+
+ return 0;
+}
+core_initcall(vmemmap_ptlock_init);
--
2.11.0
Powered by blists - more mailing lists