[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240902122445.11805-1-rongqianfeng@vivo.com>
Date: Mon, 2 Sep 2024 20:24:43 +0800
From: Rong Qianfeng <rongqianfeng@...o.com>
To: vbabka@...e.cz,
mgorman@...hsingularity.net,
Andrew Morton <akpm@...ux-foundation.org>,
Mike Rapoport <rppt@...nel.org>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Zi Yan <ziy@...dia.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Rong Qianfeng <rongqianfeng@...o.com>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: opensource.kernel@...o.com
Subject: [PATCH] mm: Skip the reserved bootmem for compaction
Reserved pages are basically non-lru pages. This kind of memory can't be
used as migration sources and targets, skip it can bring some performance
benefits.
Because some drivers may also use PG_reserved, we just set PB_migrate_skip
for those clustered reserved bootmem during memory initialization.
Signed-off-by: Rong Qianfeng <rongqianfeng@...o.com>
---
include/linux/pageblock-flags.h | 13 +++++++++++
mm/compaction.c | 40 +++++++++++++++++++++++++++++++++
mm/mm_init.c | 14 ++++++++++++
mm/page_alloc.c | 7 ++++++
4 files changed, 74 insertions(+)
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index fc6b9c87cb0a..63c5b0c69c1a 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -86,6 +86,11 @@ void set_pfnblock_flags_mask(struct page *page,
set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
page_to_pfn(page), \
(1 << PB_migrate_skip))
+
+extern void set_pageblock_skip_range(unsigned long start_pfn,
+ unsigned long end_pfn);
+extern void clear_pageblock_skip_range(unsigned long start_pfn,
+ unsigned long end_pfn);
#else
static inline bool get_pageblock_skip(struct page *page)
{
@@ -97,6 +102,14 @@ static inline void clear_pageblock_skip(struct page *page)
static inline void set_pageblock_skip(struct page *page)
{
}
+static inline void set_pageblock_skip_range(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+}
+static inline void clear_pageblock_skip_range(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+}
#endif /* CONFIG_COMPACTION */
#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/mm/compaction.c b/mm/compaction.c
index f2af4493a878..7861588b34f3 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -286,6 +286,46 @@ static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
}
#endif
+/*
+ * This function is currently used to set PB_migrate_skip for the reserved
+ * bootmem which can't be used as migration sources and targets(except CMA).
+ */
+void set_pageblock_skip_range(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ unsigned long pfn;
+
+ start_pfn = ALIGN(start_pfn, pageblock_nr_pages);
+ end_pfn = ALIGN_DOWN(end_pfn, pageblock_nr_pages);
+
+ for (pfn = start_pfn; pfn < end_pfn;
+ pfn += pageblock_nr_pages) {
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+
+ set_pageblock_skip(page);
+ }
+ }
+}
+
+void clear_pageblock_skip_range(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ unsigned long pfn;
+
+ start_pfn = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
+ end_pfn = ALIGN(end_pfn, pageblock_nr_pages);
+
+ for (pfn = start_pfn; pfn < end_pfn;
+ pfn += pageblock_nr_pages) {
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+
+ clear_pageblock_skip(page);
+ }
+ }
+}
+
/*
* Compound pages of >= pageblock_order should consistently be skipped until
* released. It is always pointless to compact pages of such order (if they are
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 4ba5607aaf19..8b7dc8e00bf1 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -768,6 +768,13 @@ void __meminit reserve_bootmem_region(phys_addr_t start,
__SetPageReserved(page);
}
}
+
+ /*
+ * Set PB_migrate_skip for reserved region. for cma memory
+ * and the memory released by free_reserved_area(), we will
+ * clear PB_migrate_skip when they are initialized.
+ */
+ set_pageblock_skip_range(start_pfn, end_pfn);
}
/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
@@ -2236,6 +2243,13 @@ void __init init_cma_reserved_pageblock(struct page *page)
set_page_count(p, 0);
} while (++p, --i);
+ /*
+ * We set the PB_migrate_skip in
+ * reserve_bootmem_region() for cma
+ * memory, clear it now.
+ */
+ clear_pageblock_skip(page);
+
set_pageblock_migratetype(page, MIGRATE_CMA);
set_page_refcounted(page);
/* pages were reserved and not allocated */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b98f9bb28234..a7729dac0198 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5887,6 +5887,13 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
if (pages && s)
pr_info("Freeing %s memory: %ldK\n", s, K(pages));
+ /*
+ * Clear PB_migrate_skip if the memory have released
+ * to the buddy system.
+ */
+ clear_pageblock_skip_range(page_to_pfn(virt_to_page(start)),
+ page_to_pfn(virt_to_page(end)));
+
return pages;
}
--
2.39.0
Powered by blists - more mailing lists