[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200721073203.107862-1-liwei213@huawei.com>
Date: Tue, 21 Jul 2020 15:32:03 +0800
From: Wei Li <liwei213@...wei.com>
To: <catalin.marinas@....com>, <will@...nel.org>
CC: <liwei213@...wei.com>, <saberlily.xia@...ilicon.com>,
<puck.chen@...ilicon.com>, <butao@...ilicon.com>,
<fengbaopeng2@...ilicon.com>, <nsaenzjulienne@...e.de>,
<steve.capper@....com>, <rppt@...ux.ibm.com>,
<song.bao.hua@...ilicon.com>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>, <sujunfei2@...ilicon.com>
Subject: [PATCH] arm64: mm: free unused memmap for sparse memory model that define VMEMMAP
For the memory hole, sparse memory model that define SPARSEMEM_VMEMMAP
do not free the reserved memory for the page map, this patch do it.
Signed-off-by: Wei Li <liwei213@...wei.com>
Signed-off-by: Chen Feng <puck.chen@...ilicon.com>
Signed-off-by: Xia Qing <saberlily.xia@...ilicon.com>
---
arch/arm64/mm/init.c | 81 +++++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 71 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 1e93cfc7c47a..d1b56b47d5ba 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -441,7 +441,48 @@ void __init bootmem_init(void)
memblock_dump_all();
}
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define VMEMMAP_PAGE_INUSE 0xFD
+static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long addr, end;
+ unsigned long next;
+ pmd_t *pmd;
+ void *page_addr;
+ phys_addr_t phys_addr;
+
+ addr = (unsigned long)pfn_to_page(start_pfn);
+ end = (unsigned long)pfn_to_page(end_pfn);
+
+ pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
+ for (; addr < end; addr = next, pmd++) {
+ next = pmd_addr_end(addr, end);
+
+ if (!pmd_present(*pmd))
+ continue;
+
+ if (IS_ALIGNED(addr, PMD_SIZE) &&
+ IS_ALIGNED(next, PMD_SIZE)) {
+ phys_addr = __pfn_to_phys(pmd_pfn(*pmd));
+ free_bootmem(phys_addr, PMD_SIZE);
+ pmd_clear(pmd);
+ } else {
+ /* If here, we are freeing vmemmap pages. */
+ memset((void *)addr, VMEMMAP_PAGE_INUSE, next - addr);
+ page_addr = page_address(pmd_page(*pmd));
+
+ if (!memchr_inv(page_addr, VMEMMAP_PAGE_INUSE,
+ PMD_SIZE)) {
+ phys_addr = __pfn_to_phys(pmd_pfn(*pmd));
+ free_bootmem(phys_addr, PMD_SIZE);
+ pmd_clear(pmd);
+ }
+ }
+ }
+
+ flush_tlb_all();
+}
+#else
static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
@@ -468,31 +509,53 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
memblock_free(pg, pgend - pg);
}
+#endif
+
/*
* The mem_map array can get very big. Free the unused area of the memory map.
*/
static void __init free_unused_memmap(void)
{
- unsigned long start, prev_end = 0;
+ unsigned long start, cur_start, prev_end = 0;
struct memblock_region *reg;
for_each_memblock(memory, reg) {
- start = __phys_to_pfn(reg->base);
+ cur_start = __phys_to_pfn(reg->base);
#ifdef CONFIG_SPARSEMEM
/*
* Take care not to free memmap entries that don't exist due
* to SPARSEMEM sections which aren't present.
*/
- start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
+ start = min(cur_start, ALIGN(prev_end, PAGES_PER_SECTION));
+
/*
- * If we had a previous bank, and there is a space between the
- * current bank and the previous, free it.
+ * Free memory in the case of:
+ * 1. if cur_start - prev_end <= PAGES_PER_SECTION,
+ * free pre_end ~ cur_start.
+ * 2. if cur_start - prev_end > PAGES_PER_SECTION,
+ * free pre_end ~ ALIGN(prev_end, PAGES_PER_SECTION).
*/
if (prev_end && prev_end < start)
free_memmap(prev_end, start);
+ /*
+ * Free memory in the case of:
+ * if cur_start - prev_end > PAGES_PER_SECTION,
+ * free ALIGN_DOWN(cur_start, PAGES_PER_SECTION) ~ cur_start.
+ */
+ if (cur_start > start &&
+ !IS_ALIGNED(cur_start, PAGES_PER_SECTION))
+ free_memmap(ALIGN_DOWN(cur_start, PAGES_PER_SECTION),
+ cur_start);
+#else
+ /*
+ * If we had a previous bank, and there is a space between the
+ * current bank and the previous, free it.
+ */
+ if (prev_end && prev_end < cur_start)
+ free_memmap(prev_end, cur_start);
+#endif
/*
* Align up here since the VM subsystem insists that the
* memmap entries are valid from the bank end aligned to
@@ -507,7 +570,6 @@ static void __init free_unused_memmap(void)
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
#endif
}
-#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
/*
* mem_init() marks the free areas in the mem_map and tells us how much memory
@@ -524,9 +586,8 @@ void __init mem_init(void)
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
free_unused_memmap();
-#endif
+
/* this will put all unused low memory onto the freelists */
memblock_free_all();
--
2.15.0
Powered by blists - more mailing lists