lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250103105158.1350689-3-guoweikang.kernel@gmail.com>
Date: Fri,  3 Jan 2025 18:51:58 +0800
From: Guo Weikang <guoweikang.kernel@...il.com>
To: Mike Rapoport <rppt@...nel.org>,
	Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	Guo Weikang <guoweikang.kernel@...il.com>
Subject: [PATCH 3/3] mm/memblock: Modify the default failure behavior of memblock_alloc_low(from)

Just like memblock_alloc, the default failure behavior of memblock_alloc_low
and memblock_alloc_from is now modified to trigger a panic when allocation
fails.

Signed-off-by: Guo Weikang <guoweikang.kernel@...il.com>
---
 arch/arc/mm/highmem.c       |  4 ----
 arch/csky/mm/init.c         |  5 ----
 arch/m68k/atari/stram.c     |  4 ----
 arch/m68k/mm/motorola.c     |  9 -------
 arch/mips/include/asm/dmi.h |  2 +-
 arch/mips/mm/init.c         |  5 ----
 arch/s390/kernel/setup.c    |  4 ----
 arch/s390/kernel/smp.c      |  3 ---
 arch/sparc/mm/init_64.c     | 13 ----------
 arch/um/kernel/mem.c        | 20 ----------------
 arch/xtensa/mm/mmu.c        |  4 ----
 include/linux/memblock.h    | 30 ++++++++++++-----------
 mm/memblock.c               | 47 +++++++++++++++++++++++++++++++++++++
 mm/percpu.c                 |  6 ++---
 14 files changed, 67 insertions(+), 89 deletions(-)

diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index c79912a6b196..4ed597b19388 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -53,10 +53,6 @@ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
 	pte_t *pte_k;
 
 	pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-	if (!pte_k)
-		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-
 	pmd_populate_kernel(&init_mm, pmd_k, pte_k);
 	return pte_k;
 }
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index bde7cabd23df..04de02a83564 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -174,11 +174,6 @@ void __init fixrange_init(unsigned long start, unsigned long end,
 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
 				if (pmd_none(*pmd)) {
 					pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-					if (!pte)
-						panic("%s: Failed to allocate %lu bytes align=%lx\n",
-						      __func__, PAGE_SIZE,
-						      PAGE_SIZE);
-
 					set_pmd(pmd, __pmd(__pa(pte)));
 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
 				}
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index 922e53bcb853..14f761330b29 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -96,10 +96,6 @@ void __init atari_stram_reserve_pages(void *start_mem)
 		pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n");
 		stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size,
 								       PAGE_SIZE);
-		if (!stram_pool.start)
-			panic("%s: Failed to allocate %lu bytes align=%lx\n",
-			      __func__, pool_size, PAGE_SIZE);
-
 		stram_pool.end = stram_pool.start + pool_size - 1;
 		request_resource(&iomem_resource, &stram_pool);
 		stram_virt_offset = 0;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index ce016ae8c972..83bbada15be2 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -227,11 +227,6 @@ static pte_t * __init kernel_page_table(void)
 
 	if (PAGE_ALIGNED(last_pte_table)) {
 		pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-		if (!pte_table) {
-			panic("%s: Failed to allocate %lu bytes align=%lx\n",
-					__func__, PAGE_SIZE, PAGE_SIZE);
-		}
-
 		clear_page(pte_table);
 		mmu_page_ctor(pte_table);
 
@@ -275,10 +270,6 @@ static pmd_t * __init kernel_ptr_table(void)
 	last_pmd_table += PTRS_PER_PMD;
 	if (PAGE_ALIGNED(last_pmd_table)) {
 		last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-		if (!last_pmd_table)
-			panic("%s: Failed to allocate %lu bytes align=%lx\n",
-			      __func__, PAGE_SIZE, PAGE_SIZE);
-
 		clear_page(last_pmd_table);
 		mmu_page_ctor(last_pmd_table);
 	}
diff --git a/arch/mips/include/asm/dmi.h b/arch/mips/include/asm/dmi.h
index dc397f630c66..9698d072cc4d 100644
--- a/arch/mips/include/asm/dmi.h
+++ b/arch/mips/include/asm/dmi.h
@@ -11,7 +11,7 @@
 #define dmi_unmap(x)			iounmap(x)
 
 /* MIPS initialize DMI scan before SLAB is ready, so we use memblock here */
-#define dmi_alloc(l)			memblock_alloc_low(l, PAGE_SIZE)
+#define dmi_alloc(l)			memblock_alloc_low_no_panic(l, PAGE_SIZE)
 
 #if defined(CONFIG_MACH_LOONGSON64)
 #define SMBIOS_ENTRY_POINT_SCAN_START	0xFFFE000
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 4583d1a2a73e..cca62f23769f 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -257,11 +257,6 @@ void __init fixrange_init(unsigned long start, unsigned long end,
 				if (pmd_none(*pmd)) {
 					pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
 									   PAGE_SIZE);
-					if (!pte)
-						panic("%s: Failed to allocate %lu bytes align=%lx\n",
-						      __func__, PAGE_SIZE,
-						      PAGE_SIZE);
-
 					set_pmd(pmd, __pmd((unsigned long)pte));
 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
 				}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e51426113f26..854d3744dacf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -397,10 +397,6 @@ static void __init setup_lowcore(void)
 	 */
 	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
 	lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
-	if (!lc)
-		panic("%s: Failed to allocate %zu bytes align=%zx\n",
-		      __func__, sizeof(*lc), sizeof(*lc));
-
 	lc->pcpu = (unsigned long)per_cpu_ptr(&pcpu_devices, 0);
 	lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT;
 	lc->restart_psw.addr = __pa(restart_int_handler);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 9eb4508b4ca4..467d4f390837 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -631,9 +631,6 @@ void __init smp_save_dump_secondary_cpus(void)
 		return;
 	/* Allocate a page as dumping area for the store status sigps */
 	page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-	if (!page)
-		panic("ERROR: Failed to allocate %lx bytes below %lx\n",
-		      PAGE_SIZE, 1UL << 31);
 
 	/* Set multi-threading state to the previous system. */
 	pcpu_set_smt(sclp.mtid_prev);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 05882bca5b73..8c813c755eb8 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1789,8 +1789,6 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 
 			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 						  PAGE_SIZE);
-			if (!new)
-				goto err_alloc;
 			alloc_bytes += PAGE_SIZE;
 			pgd_populate(&init_mm, pgd, new);
 		}
@@ -1801,8 +1799,6 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 
 			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 						  PAGE_SIZE);
-			if (!new)
-				goto err_alloc;
 			alloc_bytes += PAGE_SIZE;
 			p4d_populate(&init_mm, p4d, new);
 		}
@@ -1817,8 +1813,6 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 			}
 			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 						  PAGE_SIZE);
-			if (!new)
-				goto err_alloc;
 			alloc_bytes += PAGE_SIZE;
 			pud_populate(&init_mm, pud, new);
 		}
@@ -1833,8 +1827,6 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 			}
 			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 						  PAGE_SIZE);
-			if (!new)
-				goto err_alloc;
 			alloc_bytes += PAGE_SIZE;
 			pmd_populate_kernel(&init_mm, pmd, new);
 		}
@@ -1854,11 +1846,6 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 	}
 
 	return alloc_bytes;
-
-err_alloc:
-	panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
-	      __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
-	return -ENOMEM;
 }
 
 static void __init flush_all_kernel_tsbs(void)
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 53248ed04771..9c161fb4ed3a 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -83,10 +83,6 @@ static void __init one_page_table_init(pmd_t *pmd)
 	if (pmd_none(*pmd)) {
 		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
 							  PAGE_SIZE);
-		if (!pte)
-			panic("%s: Failed to allocate %lu bytes align=%lx\n",
-			      __func__, PAGE_SIZE, PAGE_SIZE);
-
 		set_pmd(pmd, __pmd(_KERNPG_TABLE +
 					   (unsigned long) __pa(pte)));
 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
@@ -97,10 +93,6 @@ static void __init one_md_table_init(pud_t *pud)
 {
 #if CONFIG_PGTABLE_LEVELS > 2
 	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-	if (!pmd_table)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-
 	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
 	BUG_ON(pmd_table != pmd_offset(pud, 0));
 #endif
@@ -110,10 +102,6 @@ static void __init one_ud_table_init(p4d_t *p4d)
 {
 #if CONFIG_PGTABLE_LEVELS > 3
 	pud_t *pud_table = (pud_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-	if (!pud_table)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-
 	set_p4d(p4d, __p4d(_KERNPG_TABLE + (unsigned long) __pa(pud_table)));
 	BUG_ON(pud_table != pud_offset(p4d, 0));
 #endif
@@ -163,10 +151,6 @@ static void __init fixaddr_user_init( void)
 
 	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
 	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
-	if (!v)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, size, PAGE_SIZE);
-
 	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
 	p = __pa(v);
 	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
@@ -184,10 +168,6 @@ void __init paging_init(void)
 
 	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
 							       PAGE_SIZE);
-	if (!empty_zero_page)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-
 	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
 	free_area_init(max_zone_pfn);
 
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 92e158c69c10..aee020c986a3 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -33,10 +33,6 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
 		 __func__, vaddr, n_pages);
 
 	pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
-	if (!pte)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
-
 	for (i = 0; i < n_pages; ++i)
 		pte_clear(NULL, 0, pte + i);
 
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index b68c141ebc44..3f940bf628a9 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -430,20 +430,22 @@ void *__memblock_alloc_panic(phys_addr_t size, phys_addr_t align,
 #define memblock_alloc_raw_no_panic(size, align)    \
 	 __memblock_alloc_panic(size, align, __func__, false, true)
 
-static inline void *memblock_alloc_from(phys_addr_t size,
-						phys_addr_t align,
-						phys_addr_t min_addr)
-{
-	return memblock_alloc_try_nid(size, align, min_addr,
-				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
-}
-
-static inline void *memblock_alloc_low(phys_addr_t size,
-					       phys_addr_t align)
-{
-	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
-				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
-}
+void *__memblock_alloc_from_panic(phys_addr_t size, phys_addr_t align,
+				  phys_addr_t min_addr,const char *func,
+				  bool should_panic);
+
+#define memblock_alloc_from(size, align, min_addr)    \
+	 __memblock_alloc_from_panic(size, align, min_addr,  __func__, true)
+#define memblock_alloc_from_no_panic(size, align, min_addr)    \
+	 __memblock_alloc_from_panic(size, align, min_addr, __func__, false)
+
+void *__memblock_alloc_low_panic(phys_addr_t size, phys_addr_t align,
+				 const char *func, bool should_panic);
+
+#define memblock_alloc_low(size, align)    \
+	 __memblock_alloc_low_panic(size, align, __func__, true)
+#define memblock_alloc_low_no_panic(size, align)    \
+	 __memblock_alloc_low_panic(size, align, __func__, false)
 
 static inline void *memblock_alloc_node(phys_addr_t size,
 						phys_addr_t align, int nid)
diff --git a/mm/memblock.c b/mm/memblock.c
index 4974ae2ee5ec..22922c81ff77 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1722,6 +1722,53 @@ void *__init __memblock_alloc_panic(phys_addr_t size, phys_addr_t align,
 	return addr;
 }
 
+/**
+ * __memblock_alloc_from_panic - Try to allocate memory and panic on failure
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @min_addr: the lower bound of the memory region from where the allocation
+ *	  is preferred (phys address)
+ * @func: caller func name
+ * @should_panic: whether failed panic
+ *
+ * In case of failure, it calls panic with the formatted message.
+ * This function should not be used directly, please use the macro
+ * memblock_alloc_from and memblock_alloc_from_no_panic.
+ */
+void *__init __memblock_alloc_from_panic(phys_addr_t size, phys_addr_t align,
+				    phys_addr_t min_addr, const char *func,
+				    bool should_panic)
+{
+	void *addr = memblock_alloc_try_nid(size, align, min_addr,
+				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+
+	if (unlikely(!addr && should_panic))
+		panic("%s: Failed to allocate %pap bytes\n", func, &size);
+	return addr;
+}
+
+/**
+ * __memblock_alloc_low_panic - Try to allocate memory and panic on failure
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @func: caller func name
+ * @should_panic: whether failed panic
+ *
+ * In case of failure, it calls panic with the formatted message.
+ * This function should not be used directly, please use the macro
+ * memblock_alloc_low and memblock_alloc_low_no_panic.
+ */
+void *__init __memblock_alloc_low_panic(phys_addr_t size, phys_addr_t align,
+					const char *func,bool should_panic)
+{
+	void *addr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
+
+	if (unlikely(!addr && should_panic))
+		panic("%s: Failed to allocate %pap bytes\n", func, &size);
+	return addr;
+}
+
 /**
  * memblock_free_late - free pages directly to buddy allocator
  * @base: phys starting address of the  boot memory block
diff --git a/mm/percpu.c b/mm/percpu.c
index a381d626ed32..980fba4292be 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2933,7 +2933,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
 		node = cpu_to_nd_fn(cpu);
 
 	if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
-		ptr = memblock_alloc_from(size, align, goal);
+		ptr = memblock_alloc_from_no_panic(size, align, goal);
 		pr_info("cpu %d has no node %d or node-local memory\n",
 			cpu, node);
 		pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
@@ -2948,7 +2948,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
 	}
 	return ptr;
 #else
-	return memblock_alloc_from(size, align, goal);
+	return memblock_alloc_from_no_panic(size, align, goal);
 #endif
 }
 
@@ -3318,7 +3318,7 @@ void __init setup_per_cpu_areas(void)
 
 	ai = pcpu_alloc_alloc_info(1, 1);
 	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
-	if (!ai || !fc)
+	if (!ai)
 		panic("Failed to allocate memory for percpu areas.");
 	/* kmemleak tracks the percpu allocations separately */
 	kmemleak_ignore_phys(__pa(fc));
-- 
2.25.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ