[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251228124001.3624742-28-rppt@kernel.org>
Date: Sun, 28 Dec 2025 14:39:57 +0200
From: Mike Rapoport <rppt@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Alex Shi <alexs@...nel.org>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Andreas Larsson <andreas@...sler.com>,
Borislav Petkov <bp@...en8.de>,
Brian Cain <bcain@...nel.org>,
"Christophe Leroy (CS GROUP)" <chleroy@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
"David S. Miller" <davem@...emloft.net>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...nel.org>,
Dinh Nguyen <dinguyen@...nel.org>,
Geert Uytterhoeven <geert@...ux-m68k.org>,
Guo Ren <guoren@...nel.org>,
Heiko Carstens <hca@...ux.ibm.com>,
Helge Deller <deller@....de>,
Huacai Chen <chenhuacai@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Johannes Berg <johannes@...solutions.net>,
John Paul Adrian Glaubitz <glaubitz@...sik.fu-berlin.de>,
Jonathan Corbet <corbet@....net>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Magnus Lindholm <linmag7@...il.com>,
Matt Turner <mattst88@...il.com>,
Max Filippov <jcmvbkbc@...il.com>,
Michael Ellerman <mpe@...erman.id.au>,
Michal Hocko <mhocko@...e.com>,
Michal Simek <monstr@...str.eu>,
Mike Rapoport <rppt@...nel.org>,
Muchun Song <muchun.song@...ux.dev>,
Oscar Salvador <osalvador@...e.de>,
Palmer Dabbelt <palmer@...belt.com>,
Pratyush Yadav <pratyush@...nel.org>,
Richard Weinberger <richard@....at>,
Russell King <linux@...linux.org.uk>,
Stafford Horne <shorne@...il.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
Thomas Gleixner <tglx@...utronix.de>,
Vasily Gorbik <gor@...ux.ibm.com>,
Vineet Gupta <vgupta@...nel.org>,
Vlastimil Babka <vbabka@...e.cz>,
Will Deacon <will@...nel.org>,
x86@...nel.org,
linux-alpha@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linux-csky@...r.kernel.org,
linux-cxl@...r.kernel.org,
linux-doc@...r.kernel.org,
linux-hexagon@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-m68k@...ts.linux-m68k.org,
linux-mips@...r.kernel.org,
linux-mm@...ck.org,
linux-openrisc@...r.kernel.org,
linux-parisc@...r.kernel.org,
linux-riscv@...ts.infradead.org,
linux-s390@...r.kernel.org,
linux-sh@...r.kernel.org,
linux-snps-arc@...ts.infradead.org,
linux-um@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org,
loongarch@...ts.linux.dev,
sparclinux@...r.kernel.org
Subject: [PATCH 27/28] mm/hugetlb: drop hugetlb_cma_check()
From: "Mike Rapoport (Microsoft)" <rppt@...nel.org>
hugetlb_cma_check() was required when the ordering of hugetlb_cma_reserve()
and hugetlb_bootmem_alloc() was architecture depended.
Since hugetlb_cma_reserve() is always called before hugetlb_bootmem_alloc()
there is no need to check whether hugetlb_cma_reserve() was already called.
Drop unneeded hugetlb_cma_check() function.
Signed-off-by: Mike Rapoport (Microsoft) <rppt@...nel.org>
---
mm/hugetlb.c | 1 -
mm/hugetlb_cma.c | 16 +++-------------
mm/hugetlb_cma.h | 5 -----
3 files changed, 3 insertions(+), 19 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 51273baec9e5..82b322ae3fdc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4159,7 +4159,6 @@ static int __init hugetlb_init(void)
}
}
- hugetlb_cma_check();
hugetlb_init_hstates();
gather_bootmem_prealloc();
report_hugepages();
diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c
index b1eb5998282c..f5e79103e110 100644
--- a/mm/hugetlb_cma.c
+++ b/mm/hugetlb_cma.c
@@ -85,9 +85,6 @@ hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact)
return m;
}
-
-static bool cma_reserve_called __initdata;
-
static int __init cmdline_parse_hugetlb_cma(char *p)
{
int nid, count = 0;
@@ -149,8 +146,10 @@ void __init hugetlb_cma_reserve(void)
return;
order = arch_hugetlb_cma_order();
- if (!order)
+ if (!order) {
+ pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
return;
+ }
/*
* HugeTLB CMA reservation is required for gigantic
@@ -159,7 +158,6 @@ void __init hugetlb_cma_reserve(void)
* breaking this assumption.
*/
VM_WARN_ON(order <= MAX_PAGE_ORDER);
- cma_reserve_called = true;
hugetlb_bootmem_set_nodes();
@@ -253,14 +251,6 @@ void __init hugetlb_cma_reserve(void)
hugetlb_cma_size = 0;
}
-void __init hugetlb_cma_check(void)
-{
- if (!hugetlb_cma_size || cma_reserve_called)
- return;
-
- pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
-}
-
bool hugetlb_cma_exclusive_alloc(void)
{
return hugetlb_cma_only;
diff --git a/mm/hugetlb_cma.h b/mm/hugetlb_cma.h
index 2c2ec8a7e134..78186839df3a 100644
--- a/mm/hugetlb_cma.h
+++ b/mm/hugetlb_cma.h
@@ -8,7 +8,6 @@ struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
bool node_exact);
-void hugetlb_cma_check(void);
bool hugetlb_cma_exclusive_alloc(void);
unsigned long hugetlb_cma_total_size(void);
void hugetlb_cma_validate_params(void);
@@ -31,10 +30,6 @@ struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
return NULL;
}
-static inline void hugetlb_cma_check(void)
-{
-}
-
static inline bool hugetlb_cma_exclusive_alloc(void)
{
return false;
--
2.51.0
Powered by blists - more mailing lists