[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220324070158.22969-2-jaewon31.kim@samsung.com>
Date: Thu, 24 Mar 2022 16:01:51 +0900
From: Jaewon Kim <jaewon31.kim@...sung.com>
To: rppt@...nel.org, vbabka@...e.cz, akpm@...ux-foundation.org
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
ytk.lee@...sung.com, jaewon31.kim@...il.com,
Jaewon Kim <jaewon31.kim@...sung.com>
Subject: [PATCH 1/8] memblock: introduce memsize showing reserved memory
Some of memory regions can be reserved for a specific purpose. They are
usually defined through reserved-memory in device tree. If only size
without address is specified in device tree, the address of the region
will be determined at boot time.
We may find the address of the memory regions through booting log, but
it does not show all. And it could be hard to catch the very beginning
log. The memblock_dump_all shows all memblock status but it does not
show region name and its information is difficult to summarize.
This patch introduce a debugfs node, memblock/memsize, to see reserved
memory easily.
The first patch here will show the only reserved-memory in device tree
like following example. The next patches will show more information.
$ cat debugfs/memblock/memsize
0x0f9000000-0x0fb000000 0x02000000 ( 32768 KB ) map reusable linux,cma
0x0b1900000-0x0b1b00000 0x00200000 ( 2048 KB ) nomap unusable test1
0x0b0200000-0x0b0400000 0x00200000 ( 2048 KB ) map unusable test2
.unusable : 4096 KB
.reusable : 32768 KB
Signed-off-by: Jaewon Kim <jaewon31.kim@...sung.com>
---
drivers/of/fdt.c | 2 +
drivers/of/of_reserved_mem.c | 7 ++-
include/linux/memblock.h | 9 +++
kernel/dma/contiguous.c | 2 +
mm/Kconfig | 7 +++
mm/memblock.c | 103 +++++++++++++++++++++++++++++++++++
6 files changed, 129 insertions(+), 1 deletion(-)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ec315b060cd5..ec2f60a78f8f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -619,6 +619,7 @@ static void __init fdt_reserve_elfcorehdr(void)
}
memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
+ memblock_memsize_record("elfcorehdr", elfcorehdr_addr, size, false, false);
pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
elfcorehdr_size >> 10, elfcorehdr_addr);
@@ -645,6 +646,7 @@ void __init early_init_fdt_scan_reserved_mem(void)
if (!size)
break;
early_init_dt_reserve_memory_arch(base, size, false);
+ memblock_memsize_record("memreserve", base, size, false, false);
}
fdt_scan_reserved_mem();
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 75caa6f5d36f..40323751efb2 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -261,9 +261,10 @@ void __init fdt_init_reserved_mem(void)
int len;
const __be32 *prop;
int err = 0;
- bool nomap;
+ bool nomap, reusable;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+ reusable = of_get_flat_dt_prop(node, "reusable", NULL) != NULL;
prop = of_get_flat_dt_prop(node, "phandle", &len);
if (!prop)
prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
@@ -283,6 +284,10 @@ void __init fdt_init_reserved_mem(void)
else
memblock_phys_free(rmem->base,
rmem->size);
+ } else {
+ memblock_memsize_record(rmem->name, rmem->base,
+ rmem->size, nomap,
+ reusable);
}
}
}
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 50ad19662a32..468b016e179b 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -604,5 +604,14 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
}
#endif
+#ifdef CONFIG_MEMBLOCK_MEMSIZE
+extern void memblock_memsize_record(const char *name, phys_addr_t base,
+ phys_addr_t size, bool nomap,
+ bool reusable);
+#else
+static inline void memblock_memsize_record(const char *name, phys_addr_t base,
+ phys_addr_t size, bool nomap,
+ bool reusable) { }
+#endif
#endif /* _LINUX_MEMBLOCK_H */
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 6ea80ae42622..7415c1135afa 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -239,6 +239,8 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
dma_contiguous_early_fixup(cma_get_base(*res_cma),
cma_get_size(*res_cma));
+ memblock_memsize_record("dma_cma", cma_get_base(*res_cma),
+ cma_get_size(*res_cma), false, true);
return 0;
}
diff --git a/mm/Kconfig b/mm/Kconfig
index 761f5021ba51..e29f6cd8394e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -96,6 +96,13 @@ config HAVE_FAST_GUP
depends on MMU
bool
+config MEMBLOCK_MEMSIZE
+ bool "memblock based reserved memory profiling"
+ default n
+ help
+ This patch introduce a debugfs node, memblock/memsize, to see reserved
+ memory easily.
+
# Don't discard allocated memory used to track "memory" and "reserved" memblocks
# after early boot, so it can still be used to test for validity of memory.
# Also, memblocks are updated with memory hot(un)plug.
diff --git a/mm/memblock.c b/mm/memblock.c
index b12a364f2766..8492757f7192 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -19,6 +19,7 @@
#include <asm/sections.h>
#include <linux/io.h>
+#include <linux/sort.h>
#include "internal.h"
@@ -1928,6 +1929,49 @@ static int __init early_memblock(char *p)
}
early_param("memblock", early_memblock);
+#ifdef CONFIG_MEMBLOCK_MEMSIZE
+
+#define NAME_SIZE 30
+struct memsize_rgn_struct {
+ phys_addr_t base;
+ long size;
+ bool nomap; /* 1/32 byte */
+ bool reusable; /* 1/32 byte */
+ char name[NAME_SIZE]; /* 30/32 byte */
+};
+
+#define MAX_MEMSIZE_RGN 64
+static struct memsize_rgn_struct memsize_rgn[MAX_MEMSIZE_RGN] __initdata_memblock;
+static int memsize_rgn_count __initdata_memblock;
+
+void __init_memblock memblock_memsize_record(const char *name, phys_addr_t base,
+ phys_addr_t size, bool nomap, bool reusable)
+{
+ struct memsize_rgn_struct *rgn;
+ phys_addr_t end;
+
+ if (memsize_rgn_count == MAX_MEMSIZE_RGN) {
+ pr_err("not enough space on memsize_rgn\n");
+ return;
+ }
+ rgn = &memsize_rgn[memsize_rgn_count++];
+ rgn->base = base;
+ rgn->size = size;
+ rgn->nomap = nomap;
+ rgn->reusable = reusable;
+
+ if (!name) {
+ strcpy(rgn->name, "unknown");
+ } else {
+ strncpy(rgn->name, name, NAME_SIZE - 1);
+ rgn->name[NAME_SIZE - 1] = '\0';
+ }
+ end = base + size - 1;
+ memblock_dbg("%s %pa..%pa nomap:%d reusable:%d\n",
+ __func__, &base, &end, nomap, reusable);
+}
+#endif /* MEMBLOCK_MEMSIZE */
+
static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
@@ -2138,6 +2182,61 @@ static int memblock_debug_show(struct seq_file *m, void *private)
}
DEFINE_SHOW_ATTRIBUTE(memblock_debug);
+#ifdef CONFIG_MEMBLOCK_MEMSIZE
+
+static int memsize_rgn_cmp(const void *a, const void *b)
+{
+ const struct memsize_rgn_struct *ra = a, *rb = b;
+
+ if (ra->base > rb->base)
+ return -1;
+
+ if (ra->base < rb->base)
+ return 1;
+
+ return 0;
+}
+
+static int memblock_memsize_show(struct seq_file *m, void *private)
+{
+ int i;
+ struct memsize_rgn_struct *rgn;
+ unsigned long reserved = 0, reusable = 0;
+
+ sort(memsize_rgn, memsize_rgn_count,
+ sizeof(memsize_rgn[0]), memsize_rgn_cmp, NULL);
+ for (i = 0; i < memsize_rgn_count; i++) {
+ phys_addr_t base, end;
+ long size;
+
+ rgn = &memsize_rgn[i];
+ base = rgn->base;
+ size = rgn->size;
+ end = base + size;
+
+ seq_printf(m, "0x%09lx-0x%09lx 0x%08lx ( %7lu KB ) %s %s %s\n",
+ &base, &end,
+ size, DIV_ROUND_UP(size, SZ_1K),
+ rgn->nomap ? "nomap" : " map",
+ rgn->reusable ? "reusable" : "unusable",
+ rgn->name);
+ if (rgn->reusable)
+ reusable += (unsigned long)rgn->size;
+ else
+ reserved += (unsigned long)rgn->size;
+ }
+
+ seq_printf(m, "\n");
+ seq_printf(m, " .unusable : %7lu KB\n",
+ DIV_ROUND_UP(reserved, SZ_1K));
+ seq_printf(m, " .reusable : %7lu KB\n",
+ DIV_ROUND_UP(reusable, SZ_1K));
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(memblock_memsize);
+#endif
+
static int __init memblock_init_debugfs(void)
{
struct dentry *root = debugfs_create_dir("memblock", NULL);
@@ -2150,6 +2249,10 @@ static int __init memblock_init_debugfs(void)
debugfs_create_file("physmem", 0444, root, &physmem,
&memblock_debug_fops);
#endif
+#ifdef CONFIG_MEMBLOCK_MEMSIZE
+ debugfs_create_file("memsize", 0444, root,
+ NULL, &memblock_memsize_fops);
+#endif
return 0;
}
--
2.17.1
Powered by blists - more mailing lists