lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240520175802.2002183-1-quic_obabatun@quicinc.com>
Date: Mon, 20 May 2024 10:58:02 -0700
From: Oreoluwa Babatunde <quic_obabatun@...cinc.com>
To: <ysato@...rs.sourceforge.jp>, <dalias@...c.org>,
        <glaubitz@...sik.fu-berlin.de>
CC: <linux-sh@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        <robh+dt@...nel.org>, <kernel@...cinc.com>,
        Oreoluwa Babatunde
	<quic_obabatun@...cinc.com>
Subject: [PATCH v3] sh: Restructure setup code to reserve memory regions earlier

The unflatten_device_tree() function contains a call to
memblock_alloc(). This is a problem because this allocation is done
before any of the reserved memory regions are set aside in
paging_init().
As a result, there is a possibility for memblock to unknowingly allocate
from any of the memory regions that are meant to be reserved.

Hence, restructure the setup code to set aside reserved memory
regions before any allocations are done using memblock.

Signed-off-by: Oreoluwa Babatunde <quic_obabatun@...cinc.com>
---
v3:
- Instead of moving all of paging_init(), move only the parts
  that are responsible for setting aside the reserved memory
  regions.

v2:
https://lore.kernel.org/all/20240423233150.74302-1-quic_obabatun@quicinc.com/
- Added Rob Herrings Reviewed-by.
- cc Andrew Morton to assist with merging this for sh architecture.
  Similar change made for loongarch and openrisc in v1 have already
  been merged.

v1:
https://lore.kernel.org/all/1707524971-146908-4-git-send-email-quic_obabatun@quicinc.com/

 arch/sh/include/asm/setup.h |  1 -
 arch/sh/kernel/setup.c      | 43 +++++++++++++++++++++++++++++++++++-
 arch/sh/mm/init.c           | 44 -------------------------------------
 3 files changed, 42 insertions(+), 46 deletions(-)

diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index fc807011187f..5feed99b9b7a 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -19,7 +19,6 @@
 #define COMMAND_LINE ((char *) (PARAM+0x100))
 
 void sh_mv_setup(void);
-void check_for_initrd(void);
 void per_cpu_trap_init(void);
 
 #endif /* _SH_SETUP_H */
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 620e5cf8ae1e..f5b6078173c9 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -114,7 +114,7 @@ static int __init early_parse_mem(char *p)
 }
 early_param("mem", early_parse_mem);
 
-void __init check_for_initrd(void)
+static void __init check_for_initrd(void)
 {
 #ifdef CONFIG_BLK_DEV_INITRD
 	unsigned long start, end;
@@ -172,6 +172,42 @@ void __init check_for_initrd(void)
 #endif
 }
 
+static void __init early_reserve_mem(void)
+{
+	unsigned long start_pfn;
+	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
+	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
+
+	/*
+	 * Partially used pages are not usable - thus
+	 * we are rounding upwards:
+	 */
+	start_pfn = PFN_UP(__pa(_end));
+
+	/*
+	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
+	 * this in two steps (first step was init_bootmem()), because
+	 * this catches the (definitely buggy) case of us accidentally
+	 * initializing the bootmem allocator with an invalid RAM area.
+	 */
+	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
+
+	/*
+	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
+	 */
+	if (CONFIG_ZERO_PAGE_OFFSET != 0)
+		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
+
+	/*
+	 * Handle additional early reservations
+	 */
+	check_for_initrd();
+	reserve_crashkernel();
+
+	if (sh_mv.mv_mem_reserve)
+		sh_mv.mv_mem_reserve();
+}
+
 #ifndef CONFIG_GENERIC_CALIBRATE_DELAY
 void calibrate_delay(void)
 {
@@ -319,9 +355,14 @@ void __init setup_arch(char **cmdline_p)
 
 	sh_mv_setup();
 
+	sh_mv.mv_mem_init();
+
 	/* Let earlyprintk output early console messages */
 	sh_early_platform_driver_probe("earlyprintk", 1, 1);
 
+	/* set aside reserved memory regions */
+	early_reserve_mem();
+
 #ifdef CONFIG_OF_EARLY_FLATTREE
 #ifdef CONFIG_USE_BUILTIN_DTB
 	unflatten_and_copy_device_tree();
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index bf1b54055316..4559f5bea782 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -242,55 +242,11 @@ static void __init do_init_bootmem(void)
 	sparse_init();
 }
 
-static void __init early_reserve_mem(void)
-{
-	unsigned long start_pfn;
-	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
-	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
-
-	/*
-	 * Partially used pages are not usable - thus
-	 * we are rounding upwards:
-	 */
-	start_pfn = PFN_UP(__pa(_end));
-
-	/*
-	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
-	 * this in two steps (first step was init_bootmem()), because
-	 * this catches the (definitely buggy) case of us accidentally
-	 * initializing the bootmem allocator with an invalid RAM area.
-	 */
-	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
-
-	/*
-	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
-	 */
-	if (CONFIG_ZERO_PAGE_OFFSET != 0)
-		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
-
-	/*
-	 * Handle additional early reservations
-	 */
-	check_for_initrd();
-	reserve_crashkernel();
-}
-
 void __init paging_init(void)
 {
 	unsigned long max_zone_pfns[MAX_NR_ZONES];
 	unsigned long vaddr, end;
 
-	sh_mv.mv_mem_init();
-
-	early_reserve_mem();
-
-	/*
-	 * Once the early reservations are out of the way, give the
-	 * platforms a chance to kick out some memory.
-	 */
-	if (sh_mv.mv_mem_reserve)
-		sh_mv.mv_mem_reserve();
-
 	memblock_enforce_memory_limit(memory_limit);
 	memblock_allow_resize();
 
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ