[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230701171138.1491206-2-chenjiahao16@huawei.com>
Date: Sat, 1 Jul 2023 17:11:37 +0000
From: Chen Jiahao <chenjiahao16@...wei.com>
To: <linux-kernel@...r.kernel.org>, <linux-riscv@...ts.infradead.org>,
<kexec@...ts.infradead.org>, <linux-doc@...r.kernel.org>
CC: <paul.walmsley@...ive.com>, <palmer@...belt.com>,
<conor.dooley@...rochip.com>, <guoren@...nel.org>,
<heiko@...ech.de>, <bjorn@...osinc.com>, <alex@...ti.fr>,
<akpm@...ux-foundation.org>, <atishp@...osinc.com>,
<bhe@...hat.com>, <thunder.leizhen@...wei.com>, <horms@...nel.org>
Subject: [PATCH -next v6 1/2] riscv: kdump: Implement crashkernel=X,[high,low]
On riscv, the current crash kernel allocation logic is trying to
allocate within 32bit addressible memory region by default, if
failed, try to allocate without 4G restriction.
In need of saving DMA zone memory while allocating a relatively large
crash kernel region, allocating the reserved memory top down in
high memory, without overlapping the DMA zone, is a mature solution.
Here introduce the parameter option crashkernel=X,[high,low].
One can reserve the crash kernel from high memory above DMA zone range
by explicitly passing "crashkernel=X,high"; or reserve a memory range
below 4G with "crashkernel=X,low".
Signed-off-by: Chen Jiahao <chenjiahao16@...wei.com>
Acked-by: Guo Ren <guoren@...nel.org>
---
arch/riscv/kernel/setup.c | 5 +++
arch/riscv/mm/init.c | 84 +++++++++++++++++++++++++++++++++++----
2 files changed, 82 insertions(+), 7 deletions(-)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 971fe776e2f8..376f5d49ce85 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -178,6 +178,11 @@ static void __init init_resources(void)
if (ret < 0)
goto error;
}
+ if (crashk_low_res.start != crashk_low_res.end) {
+ ret = add_resource(&iomem_resource, &crashk_low_res);
+ if (ret < 0)
+ goto error;
+ }
#endif
#ifdef CONFIG_CRASH_DUMP
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 4b95d8999120..eeb31c2cc843 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -1298,6 +1298,28 @@ static inline void setup_vm_final(void)
}
#endif /* CONFIG_MMU */
+/* Reserve 128M low memory by default for swiotlb buffer */
+#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+
+static int __init reserve_crashkernel_low(unsigned long long low_size)
+{
+ unsigned long long low_base;
+
+ low_base = memblock_phys_alloc_range(low_size, PMD_SIZE, 0, dma32_phys_limit);
+ if (!low_base) {
+ pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
+ return -ENOMEM;
+ }
+
+ pr_info("crashkernel low memory reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
+ low_base, low_base + low_size, low_size >> 20);
+
+ crashk_low_res.start = low_base;
+ crashk_low_res.end = low_base + low_size - 1;
+
+ return 0;
+}
+
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
@@ -1309,8 +1331,12 @@ static void __init reserve_crashkernel(void)
{
unsigned long long crash_base = 0;
unsigned long long crash_size = 0;
+ unsigned long long crash_low_size = 0;
unsigned long search_start = memblock_start_of_DRAM();
- unsigned long search_end = memblock_end_of_DRAM();
+ unsigned long search_end = (unsigned long)dma32_phys_limit;
+ char *cmdline = boot_command_line;
+ bool fixed_base = false;
+ bool high = false;
int ret = 0;
@@ -1326,14 +1352,36 @@ static void __init reserve_crashkernel(void)
return;
}
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
&crash_size, &crash_base);
- if (ret || !crash_size)
+ if (ret == -ENOENT) {
+ /* Fallback to crashkernel=X,[high,low] */
+ ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
+ if (ret || !crash_size)
+ return;
+
+ /*
+ * crashkernel=Y,low is valid only when crashkernel=X,high
+ * is passed.
+ */
+ ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
+ if (ret == -ENOENT)
+ crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
+ else if (ret)
+ return;
+
+ search_start = (unsigned long)dma32_phys_limit;
+ search_end = memblock_end_of_DRAM();
+ high = true;
+ } else if (ret || !crash_size) {
+ /* Invalid argument value specified */
return;
+ }
crash_size = PAGE_ALIGN(crash_size);
if (crash_base) {
+ fixed_base = true;
search_start = crash_base;
search_end = crash_base + crash_size;
}
@@ -1346,17 +1394,39 @@ static void __init reserve_crashkernel(void)
* swiotlb can work on the crash kernel.
*/
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
- search_start,
- min(search_end, (unsigned long) SZ_4G));
+ search_start, search_end);
if (crash_base == 0) {
- /* Try again without restricting region to 32bit addressible memory */
+ if (fixed_base) {
+ pr_warn("crashkernel: allocating failed with given size@...set\n");
+ return;
+ }
+
+ if (high) {
+ /* Fall back to lower 32G reservation */
+ search_start = memblock_start_of_DRAM();
+ search_end = (unsigned long)dma32_phys_limit;
+ } else {
+ /* Try again above the region of 32bit addressible memory */
+ search_start = (unsigned long)dma32_phys_limit;
+ search_end = memblock_end_of_DRAM();
+ }
+
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
- search_start, search_end);
+ search_start, search_end);
if (crash_base == 0) {
pr_warn("crashkernel: couldn't allocate %lldKB\n",
crash_size >> 10);
return;
}
+
+ if (!crash_low_size)
+ crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
+ }
+
+ if ((crash_base >= dma32_phys_limit) && crash_low_size &&
+ reserve_crashkernel_low(crash_low_size)) {
+ memblock_phys_free(crash_base, crash_size);
+ return;
}
pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
--
2.34.1
Powered by blists - more mailing lists