[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1522397755-33393-2-git-send-email-hejianet@gmail.com>
Date: Fri, 30 Mar 2018 01:15:51 -0700
From: Jia He <hejianet@...il.com>
To: Russell King <linux@...linux.org.uk>,
Andrew Morton <akpm@...ux-foundation.org>,
Michal Hocko <mhocko@...e.com>,
Catalin Marinas <catalin.marinas@....com>,
Mel Gorman <mgorman@...e.de>,
Will Deacon <will.deacon@....com>,
Mark Rutland <mark.rutland@....com>,
"H. Peter Anvin" <hpa@...or.com>
Cc: Pavel Tatashin <pasha.tatashin@...cle.com>,
Daniel Jordan <daniel.m.jordan@...cle.com>,
AKASHI Takahiro <takahiro.akashi@...aro.org>,
Gioh Kim <gi-oh.kim@...fitbricks.com>,
Steven Sistare <steven.sistare@...cle.com>,
Daniel Vacek <neelx@...hat.com>,
Eugeniu Rosca <erosca@...adit-jv.com>,
Vlastimil Babka <vbabka@...e.cz>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, James Morse <james.morse@....com>,
Ard Biesheuvel <ard.biesheuvel@...aro.org>,
Steve Capper <steve.capper@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, x86@...nel.org,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Kate Stewart <kstewart@...uxfoundation.org>,
Philippe Ombredanne <pombredanne@...b.com>,
Johannes Weiner <hannes@...xchg.org>,
Kemi Wang <kemi.wang@...el.com>,
Petr Tesarik <ptesarik@...e.com>,
YASUAKI ISHIMATSU <yasu.isimatu@...il.com>,
Andrey Ryabinin <aryabinin@...tuozzo.com>,
Nikolay Borisov <nborisov@...e.com>, richard.weiyang@...il.com,
Jia He <hejianet@...il.com>, Jia He <jia.he@...-semitech.com>
Subject: [PATCH v4 1/5] mm: page_alloc: remain memblock_next_valid_pfn() on arm and arm64
Commit b92df1de5d28 ("mm: page_alloc: skip over regions of invalid pfns
where possible") optimized the loop in memmap_init_zone(). But it causes
possible panic bug. So Daniel Vacek reverted it later.
But as suggested by Daniel Vacek, it is fine to using memblock to skip
gaps and finding next valid frame with CONFIG_HAVE_ARCH_PFN_VALID.
On arm and arm64, memblock is used by default. But generic version of
pfn_valid() is based on mem sections and memblock_next_valid_pfn() does
not always return the next valid one but skips more resulting in some
valid frames to be skipped (as if they were invalid). And that's why
kernel was eventually crashing on some !arm machines.
And as verified by Eugeniu Rosca, arm can benifit from commit
b92df1de5d28. So remain the memblock_next_valid_pfn on arm{,64} and move
the related codes to arm64 arch directory.
Suggested-by: Daniel Vacek <neelx@...hat.com>
Signed-off-by: Jia He <jia.he@...-semitech.com>
---
arch/arm/mm/init.c | 31 ++++++++++++++++++++++++++++++-
arch/arm64/mm/init.c | 31 ++++++++++++++++++++++++++++++-
mm/page_alloc.c | 13 ++++++++++++-
3 files changed, 72 insertions(+), 3 deletions(-)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index a1f11a7..0fb85ca 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -198,7 +198,36 @@ int pfn_valid(unsigned long pfn)
return memblock_is_map_memory(__pfn_to_phys(pfn));
}
EXPORT_SYMBOL(pfn_valid);
-#endif
+
+/* HAVE_MEMBLOCK is always enabled on arm */
+unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn)
+{
+ struct memblock_type *type = &memblock.memory;
+ unsigned int right = type->cnt;
+ unsigned int mid, left = 0;
+ phys_addr_t addr = PFN_PHYS(++pfn);
+
+ do {
+ mid = (right + left) / 2;
+
+ if (addr < type->regions[mid].base)
+ right = mid;
+ else if (addr >= (type->regions[mid].base +
+ type->regions[mid].size))
+ left = mid + 1;
+ else {
+ /* addr is within the region, so pfn is valid */
+ return pfn;
+ }
+ } while (left < right);
+
+ if (right == type->cnt)
+ return -1UL;
+ else
+ return PHYS_PFN(type->regions[right].base);
+}
+EXPORT_SYMBOL(memblock_next_valid_pfn);
+#endif /*CONFIG_HAVE_ARCH_PFN_VALID*/
#ifndef CONFIG_SPARSEMEM
static void __init arm_memory_present(void)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 00e7b90..13e43ff 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -290,7 +290,36 @@ int pfn_valid(unsigned long pfn)
return memblock_is_map_memory(pfn << PAGE_SHIFT);
}
EXPORT_SYMBOL(pfn_valid);
-#endif
+
+/* HAVE_MEMBLOCK is always enabled on arm64 */
+unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn)
+{
+ struct memblock_type *type = &memblock.memory;
+ unsigned int right = type->cnt;
+ unsigned int mid, left = 0;
+ phys_addr_t addr = PFN_PHYS(++pfn);
+
+ do {
+ mid = (right + left) / 2;
+
+ if (addr < type->regions[mid].base)
+ right = mid;
+ else if (addr >= (type->regions[mid].base +
+ type->regions[mid].size))
+ left = mid + 1;
+ else {
+ /* addr is within the region, so pfn is valid */
+ return pfn;
+ }
+ } while (left < right);
+
+ if (right == type->cnt)
+ return -1UL;
+ else
+ return PHYS_PFN(type->regions[right].base);
+}
+EXPORT_SYMBOL(memblock_next_valid_pfn);
+#endif /*CONFIG_HAVE_ARCH_PFN_VALID*/
#ifndef CONFIG_SPARSEMEM
static void __init arm64_memory_present(void)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c19f5ac..8a92df7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5452,6 +5452,15 @@ void __ref build_all_zonelists(pg_data_t *pgdat)
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
+#if (defined CONFIG_HAVE_MEMBLOCK) && (defined CONFIG_HAVE_ARCH_PFN_VALID)
+extern unsigned long memblock_next_valid_pfn(unsigned long pfn);
+#define skip_to_last_invalid_pfn(pfn) (memblock_next_valid_pfn(pfn) - 1)
+#endif
+
+#ifndef skip_to_last_invalid_pfn
+#define skip_to_last_invalid_pfn(pfn) (pfn)
+#endif
+
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn, enum memmap_context context,
struct vmem_altmap *altmap)
@@ -5483,8 +5492,10 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (context != MEMMAP_EARLY)
goto not_early;
- if (!early_pfn_valid(pfn))
+ if (!early_pfn_valid(pfn)) {
+ pfn = skip_to_last_invalid_pfn(pfn);
continue;
+ }
if (!early_pfn_in_nid(pfn, nid))
continue;
if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
--
2.7.4
Powered by blists - more mailing lists