lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230727204624.1942372-7-usama.arif@bytedance.com>
Date:   Thu, 27 Jul 2023 21:46:24 +0100
From:   Usama Arif <usama.arif@...edance.com>
To:     linux-mm@...ck.org, muchun.song@...ux.dev, mike.kravetz@...cle.com,
        rppt@...nel.org
Cc:     linux-kernel@...r.kernel.org, fam.zheng@...edance.com,
        liangma@...ngbit.com, simon.evans@...edance.com,
        punit.agrawal@...edance.com, Usama Arif <usama.arif@...edance.com>
Subject: [v1 6/6] mm: hugetlb: Skip initialization of struct pages freed later by HVO

This is done by marking the region for which to skip initialization
with the MEMBLOCK_RSRV_NOINIT flag.
If the region is for hugepages and if HVO is enabled, then those
struct pages which will be freed later don't need to be initialized.
This can save significant time when a large number of hugepages are
allocated at boot time. HUGETLB_VMEMMAP_RESERVE_SIZE
struct pages at the start of hugepage still need to be initialized.

Signed-off-by: Usama Arif <usama.arif@...edance.com>
---
 mm/hugetlb.c         | 21 +++++++++++++++++++++
 mm/hugetlb_vmemmap.c |  2 +-
 mm/hugetlb_vmemmap.h |  3 +++
 3 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c1fcf2af591a..bb2b12f41026 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3166,6 +3166,9 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
 {
 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
 	int nr_nodes, node;
+	phys_addr_t hugetlb_vmemmap_reserve_size =
+		HUGETLB_VMEMMAP_RESERVE_SIZE * sizeof(struct page);
+	phys_addr_t noinit_base;
 
 	/* do node specific alloc */
 	if (nid != NUMA_NO_NODE) {
@@ -3173,6 +3176,15 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
 				0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 		if (!m)
 			return 0;
+
+		if (vmemmap_optimize_enabled && hugetlb_vmemmap_optimizable(h)) {
+			noinit_base = virt_to_phys(
+				(void *)((phys_addr_t) m + hugetlb_vmemmap_reserve_size));
+			memblock_rsrv_mark_noinit(
+				noinit_base,
+				huge_page_size(h) - hugetlb_vmemmap_reserve_size);
+		}
+
 		goto found;
 	}
 	/* allocate from next node when distributing huge pages */
@@ -3187,6 +3199,15 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
 		 */
 		if (!m)
 			return 0;
+
+		if (vmemmap_optimize_enabled && hugetlb_vmemmap_optimizable(h)) {
+			noinit_base = virt_to_phys(
+				(void *)((phys_addr_t) m + hugetlb_vmemmap_reserve_size));
+			memblock_rsrv_mark_noinit(
+				noinit_base,
+				huge_page_size(h) - hugetlb_vmemmap_reserve_size);
+		}
+
 		goto found;
 	}
 
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index bdf750a4786b..b5b7834e0f42 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -443,7 +443,7 @@ static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
 DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
 
-static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
+bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
 
 /**
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 07555d2dc0cb..cb5171abe683 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -64,4 +64,7 @@ static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
 {
 	return hugetlb_vmemmap_optimizable_size(h) != 0;
 }
+
+extern bool vmemmap_optimize_enabled;
+
 #endif /* _LINUX_HUGETLB_VMEMMAP_H */
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ