lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201124095259.58755-12-songmuchun@bytedance.com>
Date:   Tue, 24 Nov 2020 17:52:54 +0800
From:   Muchun Song <songmuchun@...edance.com>
To:     corbet@....net, mike.kravetz@...cle.com, tglx@...utronix.de,
        mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
        dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
        viro@...iv.linux.org.uk, akpm@...ux-foundation.org,
        paulmck@...nel.org, mchehab+huawei@...nel.org,
        pawan.kumar.gupta@...ux.intel.com, rdunlap@...radead.org,
        oneukum@...e.com, anshuman.khandual@....com, jroedel@...e.de,
        almasrymina@...gle.com, rientjes@...gle.com, willy@...radead.org,
        osalvador@...e.de, mhocko@...e.com, song.bao.hua@...ilicon.com
Cc:     duanxiongchun@...edance.com, linux-doc@...r.kernel.org,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        linux-fsdevel@...r.kernel.org,
        Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v6 11/16] mm/hugetlb: Introduce remap_huge_page_pmd_vmemmap helper

The __free_huge_page_pmd_vmemmap and __remap_huge_page_pmd_vmemmap are
almost the same code. So introduce remap_free_huge_page_pmd_vmemmap
helper to simplify the code.

Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
 mm/hugetlb_vmemmap.c | 87 +++++++++++++++++++++-------------------------------
 1 file changed, 35 insertions(+), 52 deletions(-)

diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index d6a1b06c1322..509ca451e232 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -127,6 +127,10 @@
 	(__boundary - 1 < (end) - 1) ? __boundary : (end);		 \
 })
 
+typedef void (*vmemmap_pte_remap_func_t)(struct page *reuse, pte_t *ptep,
+					 unsigned long start, unsigned long end,
+					 void *priv);
+
 static inline unsigned int vmemmap_pages_per_hpage(struct hstate *h)
 {
 	return free_vmemmap_pages_per_hpage(h) + RESERVE_VMEMMAP_NR;
@@ -162,21 +166,42 @@ static pmd_t *vmemmap_to_pmd(unsigned long page)
 	return pmd_offset(pud, page);
 }
 
+static void remap_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
+					unsigned long end,
+					vmemmap_pte_remap_func_t fn, void *priv)
+{
+	unsigned long next, addr = start;
+	struct page *reuse = NULL;
+
+	do {
+		pte_t *ptep;
+
+		ptep = pte_offset_kernel(pmd, addr);
+		if (!reuse)
+			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
+
+		next = vmemmap_hpage_addr_end(addr, end);
+		fn(reuse, ptep, addr, next, priv);
+	} while (pmd++, addr = next, addr != end);
+
+	flush_tlb_kernel_range(start, end);
+}
+
 static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 					  unsigned long start,
-					  unsigned long end,
-					  struct list_head *remap_pages)
+					  unsigned long end, void *priv)
 {
 	pgprot_t pgprot = PAGE_KERNEL;
 	void *from = page_to_virt(reuse);
 	unsigned long addr;
+	struct list_head *pages = priv;
 
 	for (addr = start; addr < end; addr += PAGE_SIZE) {
 		void *to;
 		struct page *page;
 		pte_t entry, old = *ptep;
 
-		page = list_first_entry(remap_pages, struct page, lru);
+		page = list_first_entry(pages, struct page, lru);
 		list_del(&page->lru);
 		to = page_to_virt(page);
 		copy_page(to, from);
@@ -196,28 +221,6 @@ static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 	}
 }
 
-static void __remap_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
-					  unsigned long end,
-					  struct list_head *vmemmap_pages)
-{
-	unsigned long next, addr = start;
-	struct page *reuse = NULL;
-
-	do {
-		pte_t *ptep;
-
-		ptep = pte_offset_kernel(pmd, addr);
-		if (!reuse)
-			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
-
-		next = vmemmap_hpage_addr_end(addr, end);
-		__remap_huge_page_pte_vmemmap(reuse, ptep, addr, next,
-					      vmemmap_pages);
-	} while (pmd++, addr = next, addr != end);
-
-	flush_tlb_kernel_range(start, end);
-}
-
 static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
 {
 	unsigned int nr = free_vmemmap_pages_per_hpage(h);
@@ -258,7 +261,8 @@ void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
 
 	start = vmemmap_addr + RESERVE_VMEMMAP_SIZE;
 	end = vmemmap_addr + vmemmap_pages_size_per_hpage(h);
-	__remap_huge_page_pmd_vmemmap(pmd, start, end, &map_pages);
+	remap_huge_page_pmd_vmemmap(pmd, start, end,
+				    __remap_huge_page_pte_vmemmap, &map_pages);
 }
 
 static inline void free_vmemmap_page_list(struct list_head *list)
@@ -273,13 +277,13 @@ static inline void free_vmemmap_page_list(struct list_head *list)
 
 static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 					 unsigned long start,
-					 unsigned long end,
-					 struct list_head *free_pages)
+					 unsigned long end, void *priv)
 {
 	/* Make the tail pages are mapped read-only. */
 	pgprot_t pgprot = PAGE_KERNEL_RO;
 	pte_t entry = mk_pte(reuse, pgprot);
 	unsigned long addr;
+	struct list_head *pages = priv;
 
 	for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) {
 		struct page *page;
@@ -287,34 +291,12 @@ static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 
 		VM_WARN_ON(!pte_present(old));
 		page = pte_page(old);
-		list_add(&page->lru, free_pages);
+		list_add(&page->lru, pages);
 
 		set_pte_at(&init_mm, addr, ptep, entry);
 	}
 }
 
-static void __free_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
-					 unsigned long end,
-					 struct list_head *vmemmap_pages)
-{
-	unsigned long next, addr = start;
-	struct page *reuse = NULL;
-
-	do {
-		pte_t *ptep;
-
-		ptep = pte_offset_kernel(pmd, addr);
-		if (!reuse)
-			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
-
-		next = vmemmap_hpage_addr_end(addr, end);
-		__free_huge_page_pte_vmemmap(reuse, ptep, addr, next,
-					     vmemmap_pages);
-	} while (pmd++, addr = next, addr != end);
-
-	flush_tlb_kernel_range(start, end);
-}
-
 void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
 	pmd_t *pmd;
@@ -330,7 +312,8 @@ void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 
 	start = vmemmap_addr + RESERVE_VMEMMAP_SIZE;
 	end = vmemmap_addr + vmemmap_pages_size_per_hpage(h);
-	__free_huge_page_pmd_vmemmap(pmd, start, end, &free_pages);
+	remap_huge_page_pmd_vmemmap(pmd, start, end,
+				    __free_huge_page_pte_vmemmap, &free_pages);
 	free_vmemmap_page_list(&free_pages);
 }
 
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ