lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230922070923.355656-4-yajun.deng@linux.dev>
Date:   Fri, 22 Sep 2023 15:09:22 +0800
From:   Yajun Deng <yajun.deng@...ux.dev>
To:     akpm@...ux-foundation.org, mike.kravetz@...cle.com,
        muchun.song@...ux.dev, glider@...gle.com, elver@...gle.com,
        dvyukov@...gle.com, rppt@...nel.org, david@...hat.com,
        osalvador@...e.de
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        kasan-dev@...glegroups.com, Yajun Deng <yajun.deng@...ux.dev>
Subject: [PATCH 3/4] mm: Set page count and mark page reserved in reserve_bootmem_region

memmap_init_range() would set page count of all pages, but the free
pages count would be reset in __free_pages_core(). These two are
opposite operations. It's unnecessary and time-consuming when it's
in MEMINIT_EARLY context.

Set page count and mark page reserved in reserve_bootmem_region when
in MEMINIT_EARLY context, and change the context from MEMINIT_LATE
to MEMINIT_EARLY in __free_pages_memory.

At the same time, the init list head in reserve_bootmem_region isn't
need. As it already done in __init_single_page.

The following data was tested on an x86 machine with 190GB of RAM.

before:
free_low_memory_core_early()	342ms

after:
free_low_memory_core_early()	286ms

Signed-off-by: Yajun Deng <yajun.deng@...ux.dev>
---
 mm/memblock.c   |  2 +-
 mm/mm_init.c    | 20 ++++++++++++++------
 mm/page_alloc.c |  8 +++++---
 3 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/mm/memblock.c b/mm/memblock.c
index a32364366bb2..9276f1819982 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2089,7 +2089,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
 		while (start + (1UL << order) > end)
 			order--;
 
-		memblock_free_pages(start, order, MEMINIT_LATE);
+		memblock_free_pages(start, order, MEMINIT_EARLY);
 
 		start += (1UL << order);
 	}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 0a4437aae30d..1cc310f706a9 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -718,7 +718,7 @@ static void __meminit init_reserved_page(unsigned long pfn, int nid)
 		if (zone_spans_pfn(zone, pfn))
 			break;
 	}
-	__init_single_page(pfn_to_page(pfn), pfn, zid, nid, true, false);
+	__init_single_page(pfn_to_page(pfn), pfn, zid, nid, false, false);
 }
 #else
 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
@@ -756,8 +756,8 @@ void __meminit reserve_bootmem_region(phys_addr_t start,
 
 			init_reserved_page(start_pfn, nid);
 
-			/* Avoid false-positive PageTail() */
-			INIT_LIST_HEAD(&page->lru);
+			/* Set page count for the reserve region */
+			init_page_count(page);
 
 			/*
 			 * no need for atomic set_bit because the struct
@@ -888,9 +888,17 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
 		}
 
 		page = pfn_to_page(pfn);
-		__init_single_page(page, pfn, zone, nid, true, false);
-		if (context == MEMINIT_HOTPLUG)
-			__SetPageReserved(page);
+
+		/* If the context is MEMINIT_EARLY, we will set page count and
+		 * mark page reserved in reserve_bootmem_region, the free region
+		 * wouldn't have page count and reserved flag and we don't
+		 * need to reset pages count and clear reserved flag in
+		 * __free_pages_core.
+		 */
+		if (context == MEMINIT_EARLY)
+			__init_single_page(page, pfn, zone, nid, false, false);
+		else
+			__init_single_page(page, pfn, zone, nid, true, true);
 
 		/*
 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6c4f4531bee0..6ac58c5f3b00 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1285,9 +1285,11 @@ void __free_pages_core(struct page *page, unsigned int order, enum meminit_conte
 	unsigned int loop;
 
 	/*
-	 * When initializing the memmap, __init_single_page() sets the refcount
-	 * of all pages to 1 ("allocated"/"not free"). We have to set the
-	 * refcount of all involved pages to 0.
+	 * When initializing the memmap, memmap_init_range sets the refcount
+	 * of all pages to 1 ("allocated"/"not free") in hotplug context. We
+	 * have to set the refcount of all involved pages to 0. Otherwise,
+	 * we don't do it, as reserve_bootmem_region only set the refcount on
+	 * reserve region ("allocated") in early context.
 	 */
 	if (context != MEMINIT_EARLY) {
 		prefetchw(p);
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ