[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <tencent_600D1BCC5E1B5DB4DDD98679DA7DC3123C06@qq.com>
Date: Sun, 28 Dec 2025 19:38:50 +0800
From: "shengminghu512" <shengminghu512@...com>
To: "akpm" <akpm@...ux-foundation.org>, "david" <david@...nel.org>
Cc: "lorenzo.stoakes" <lorenzo.stoakes@...cle.com>, "Liam.Howlett" <Liam.Howlett@...cle.com>, "vbabka" <vbabka@...e.cz>, "rppt" <rppt@...nel.org>, "surenb" <surenb@...gle.com>, "mhocko" <mhocko@...e.com>, "linux-mm" <linux-mm@...ck.org>, "linux-kernel" <linux-kernel@...r.kernel.org>, "hu.shengming" <hu.shengming@....com.cn>, "zhang.run" <zhang.run@....com.cn>
Subject: [PATCH] mm/memblock: drop redundant 'struct page *' argument from memblock_free_pages()
From: Shengming Hu <hu.shengming@....com.cn>
memblock_free_pages() currently takes both a struct page * and the
corresponding PFN. The page pointer is always derived from the PFN at
call sites (pfn_to_page(pfn)), making the parameter redundant and also
allowing accidental mismatches between the two arguments.
Simplify the interface by removing the struct page * argument and
deriving the page locally from the PFN, after the deferred struct page
initialization check. This keeps the behavior unchanged while making
the helper harder to misuse.
Signed-off-by: Shengming Hu <hu.shengming@....com.cn>
---
mm/internal.h | 3 +--
mm/memblock.c | 4 ++--
mm/mm_init.c | 7 +++++--
tools/testing/memblock/internal.h | 3 +--
4 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index e430da900..5f93ee145 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -742,8 +742,7 @@ static inline void clear_zone_contiguous(struct zone *zone)
extern int __isolate_free_page(struct page *page, unsigned int order);
extern void __putback_isolated_page(struct page *page, unsigned int order,
int mt);
-extern void memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order);
+extern void memblock_free_pages(unsigned long pfn, unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order,
enum meminit_context context);
diff --git a/mm/memblock.c b/mm/memblock.c
index 905d06b16..6e11f81c4 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1771,7 +1771,7 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
end = PFN_DOWN(base + size);
for (; cursor < end; cursor++) {
- memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+ memblock_free_pages(cursor, 0);
totalram_pages_inc();
}
}
@@ -2216,7 +2216,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
while (start + (1UL << order) > end)
order--;
- memblock_free_pages(pfn_to_page(start), start, order);
+ memblock_free_pages(start, order);
start += (1UL << order);
}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index fc2a6f1e5..8e95d65cf 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -2480,9 +2480,10 @@ void *__init alloc_large_system_hash(const char *tablename,
return table;
}
-void __init memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order)
+void __init memblock_free_pages(unsigned long pfn, unsigned int order)
{
+ struct page *page;
+
if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
int nid = early_pfn_to_nid(pfn);
@@ -2490,6 +2491,8 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
return;
}
+ page = pfn_to_page(pfn);
+
if (!kmsan_memblock_free_pages(page, order)) {
/* KMSAN will take care of these pages. */
return;
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 0ab4b53bb..009b97bbd 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -15,8 +15,7 @@ bool mirrored_kernelcore = false;
struct page {};
-void memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order)
+void memblock_free_pages(unsigned long pfn, unsigned int order)
{
}
--
2.25.1
Powered by blists - more mailing lists