[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201220082754.6900-4-bhe@redhat.com>
Date: Sun, 20 Dec 2020 16:27:52 +0800
From: Baoquan He <bhe@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, akpm@...ux-foundation.org,
gopakumarr@...are.com, rppt@...nel.org, david@...hat.com,
bhe@...hat.com
Subject: [PATCH v2 3/5] mm: simplify parater of function memmap_init_zone()
As David suggested, simply passing 'struct zone *zone' is enough. We can
get all needed information from 'struct zone*' easily.
Suggested-by: David Hildenbrand <david@...hat.com>
Signed-off-by: Baoquan He <bhe@...hat.com>
---
arch/ia64/include/asm/pgtable.h | 3 +--
arch/ia64/mm/init.c | 12 +++++++-----
mm/page_alloc.c | 20 ++++++++++----------
3 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index dce2ff37df65..2c81394a2430 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -520,8 +520,7 @@ extern struct page *zero_page_memmap_ptr;
# ifdef CONFIG_VIRTUAL_MEM_MAP
/* arch mem_map init routine is needed due to holes in a virtual mem_map */
- extern void memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn);
+ extern void memmap_init_zone(struct zone *zone);
# endif /* CONFIG_VIRTUAL_MEM_MAP */
# endif /* !__ASSEMBLY__ */
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index c8e68e92beb3..ccbda1a74c95 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -541,12 +541,14 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
return 0;
}
-void __meminit
-memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn)
+void __meminit memmap_init_zone(struct zone *zone)
{
+ unsigned long size = zone->spanned_pages;
+ int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
+ unsigned long start_pfn = zone->zone_start_pfn;
+
if (!vmem_map) {
- memmap_init_range(size, nid, zone, start_pfn, start_pfn + size,
+ memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} else {
struct page *start;
@@ -556,7 +558,7 @@ memmap_init_zone(unsigned long size, int nid, unsigned long zone,
args.start = start;
args.end = start + size;
args.nid = nid;
- args.zone = zone;
+ args.zone = zone_id;
efi_memmap_walk(virtual_memmap_init, &args);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4b46326099d9..7a6626351ed7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6292,16 +6292,16 @@ static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
}
#endif
-void __init __weak memmap_init_zone(unsigned long size, int nid,
- unsigned long zone,
- unsigned long zone_start_pfn)
+void __init __weak memmap_init_zone(struct zone *zone)
{
unsigned long start_pfn, end_pfn, hole_start_pfn = 0;
- unsigned long zone_end_pfn = zone_start_pfn + size;
+ int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone);
+ unsigned long zone_start_pfn = zone->zone_start_pfn;
+ unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
u64 pgcnt = 0;
- int i;
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ unsigned long size;
start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
hole_start_pfn = clamp(hole_start_pfn, zone_start_pfn,
@@ -6309,13 +6309,13 @@ void __init __weak memmap_init_zone(unsigned long size, int nid,
if (end_pfn > start_pfn) {
size = end_pfn - start_pfn;
- memmap_init_range(size, nid, zone, start_pfn, zone_end_pfn,
+ memmap_init_range(size, nid, zone_id, start_pfn, zone_end_pfn,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
}
if (hole_start_pfn < start_pfn)
pgcnt += init_unavailable_range(hole_start_pfn,
- start_pfn, zone, nid);
+ start_pfn, zone_id, nid);
hole_start_pfn = end_pfn;
}
@@ -6328,11 +6328,11 @@ void __init __weak memmap_init_zone(unsigned long size, int nid,
*/
if (hole_start_pfn < zone_end_pfn)
pgcnt += init_unavailable_range(hole_start_pfn, zone_end_pfn,
- zone, nid);
+ zone_id, nid);
if (pgcnt)
pr_info("%s: Zeroed struct page in unavailable ranges: %lld\n",
- zone_names[zone], pgcnt);
+ zone_names[zone_id], pgcnt);
}
static int zone_batchsize(struct zone *zone)
@@ -7039,7 +7039,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
set_pageblock_order();
setup_usemap(pgdat, zone, zone_start_pfn, size);
init_currently_empty_zone(zone, zone_start_pfn, size);
- memmap_init_zone(size, nid, j, zone_start_pfn);
+ memmap_init_zone(zone);
}
}
--
2.17.2
Powered by blists - more mailing lists