[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250411053745.1817356-4-changyuanl@google.com>
Date: Thu, 10 Apr 2025 22:37:34 -0700
From: Changyuan Lyu <changyuanl@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: akpm@...ux-foundation.org, anthony.yznaga@...cle.com, arnd@...db.de,
ashish.kalra@....com, benh@...nel.crashing.org, bp@...en8.de,
catalin.marinas@....com, corbet@....net, dave.hansen@...ux.intel.com,
devicetree@...r.kernel.org, dwmw2@...radead.org, ebiederm@...ssion.com,
graf@...zon.com, hpa@...or.com, jgowans@...zon.com, kexec@...ts.infradead.org,
krzk@...nel.org, linux-arm-kernel@...ts.infradead.org,
linux-doc@...r.kernel.org, linux-mm@...ck.org, luto@...nel.org,
mark.rutland@....com, mingo@...hat.com, pasha.tatashin@...een.com,
pbonzini@...hat.com, peterz@...radead.org, ptyadav@...zon.de, robh@...nel.org,
rostedt@...dmis.org, rppt@...nel.org, saravanak@...gle.com,
skinsburskii@...ux.microsoft.com, tglx@...utronix.de, thomas.lendacky@....com,
will@...nel.org, x86@...nel.org, Changyuan Lyu <changyuanl@...gle.com>
Subject: [PATCH v6 03/14] memblock: introduce memmap_init_kho_scratch()
From: "Mike Rapoport (Microsoft)" <rppt@...nel.org>
With deferred initialization of struct page it will be necessary to
initialize memory map for KHO scratch regions early.
Add memmap_init_kho_scratch() method that will allow such initialization
in upcoming patches.
Signed-off-by: Mike Rapoport (Microsoft) <rppt@...nel.org>
Signed-off-by: Changyuan Lyu <changyuanl@...gle.com>
---
include/linux/memblock.h | 2 ++
mm/internal.h | 2 ++
mm/memblock.c | 22 ++++++++++++++++++++++
mm/mm_init.c | 11 ++++++++---
4 files changed, 34 insertions(+), 3 deletions(-)
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 993937a6b9620..bb19a25342246 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -635,9 +635,11 @@ static inline void memtest_report_meminfo(struct seq_file *m) { }
#ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
void memblock_set_kho_scratch_only(void);
void memblock_clear_kho_scratch_only(void);
+void memmap_init_kho_scratch_pages(void);
#else
static inline void memblock_set_kho_scratch_only(void) { }
static inline void memblock_clear_kho_scratch_only(void) { }
+static inline void memmap_init_kho_scratch_pages(void) {}
#endif
#endif /* _LINUX_MEMBLOCK_H */
diff --git a/mm/internal.h b/mm/internal.h
index 50c2f590b2d04..a47e5539321b4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1121,6 +1121,8 @@ DECLARE_STATIC_KEY_TRUE(deferred_pages);
bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+void init_deferred_page(unsigned long pfn, int nid);
+
enum mminit_level {
MMINIT_WARNING,
MMINIT_VERIFY,
diff --git a/mm/memblock.c b/mm/memblock.c
index 3a213e2a485bc..c2633003ed8ea 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -946,6 +946,28 @@ __init_memblock void memblock_clear_kho_scratch_only(void)
{
kho_scratch_only = false;
}
+
+void __init_memblock memmap_init_kho_scratch_pages(void)
+{
+ phys_addr_t start, end;
+ unsigned long pfn;
+ int nid;
+ u64 i;
+
+ if (!IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT))
+ return;
+
+ /*
+ * Initialize struct pages for free scratch memory.
+ * The struct pages for reserved scratch memory will be set up in
+ * reserve_bootmem_region()
+ */
+ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+ MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
+ for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++)
+ init_deferred_page(pfn, nid);
+ }
+}
#endif
/**
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 84f14fa12d0dd..1451cb250fd3f 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -743,7 +743,7 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
return false;
}
-static void __meminit init_deferred_page(unsigned long pfn, int nid)
+static void __meminit __init_deferred_page(unsigned long pfn, int nid)
{
if (early_page_initialised(pfn, nid))
return;
@@ -763,11 +763,16 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
return false;
}
-static inline void init_deferred_page(unsigned long pfn, int nid)
+static inline void __init_deferred_page(unsigned long pfn, int nid)
{
}
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+void __meminit init_deferred_page(unsigned long pfn, int nid)
+{
+ __init_deferred_page(pfn, nid);
+}
+
/*
* Initialised pages do not have PageReserved set. This function is
* called for each range allocated by the bootmem allocator and
@@ -784,7 +789,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start,
if (pfn_valid(start_pfn)) {
struct page *page = pfn_to_page(start_pfn);
- init_deferred_page(start_pfn, nid);
+ __init_deferred_page(start_pfn, nid);
/*
* no need for atomic set_bit because the struct
--
2.49.0.604.gff1f9ca942-goog
Powered by blists - more mailing lists