[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200316125450.GG3486@MiWiFi-R3L-srv>
Date: Mon, 16 Mar 2020 20:54:50 +0800
From: Baoquan He <bhe@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, mhocko@...e.com, akpm@...ux-foundation.org,
david@...hat.com, willy@...radead.org, richard.weiyang@...il.com,
vbabka@...e.cz
Subject: [PATCH v5 1/2] mm/sparse.c: Use kvmalloc/kvfree to alloc/free memmap
for the classic sparse
This change makes populate_section_memmap()/depopulate_section_memmap
much simpler.
Suggested-by: Michal Hocko <mhocko@...nel.org>
Signed-off-by: Baoquan He <bhe@...hat.com>
Reviewed-by: David Hildenbrand <david@...hat.com>
Acked-by: Michal Hocko <mhocko@...e.com>
Reviewed-by: Pankaj Gupta <pankaj.gupta.linux@...il.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
mm/sparse.c | 27 +++------------------------
1 file changed, 3 insertions(+), 24 deletions(-)
diff --git a/mm/sparse.c b/mm/sparse.c
index e747a238a860..3fa407d7f70a 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -719,35 +719,14 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
{
- struct page *page, *ret;
- unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
-
- page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
- if (page)
- goto got_map_page;
-
- ret = vmalloc(memmap_size);
- if (ret)
- goto got_map_ptr;
-
- return NULL;
-got_map_page:
- ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
-got_map_ptr:
-
- return ret;
+ return kvmalloc(array_size(sizeof(struct page),
+ PAGES_PER_SECTION), GFP_KERNEL);
}
static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{
- struct page *memmap = pfn_to_page(pfn);
-
- if (is_vmalloc_addr(memmap))
- vfree(memmap);
- else
- free_pages((unsigned long)memmap,
- get_order(sizeof(struct page) * PAGES_PER_SECTION));
+ kvfree(pfn_to_page(pfn));
}
static void free_map_bootmem(struct page *memmap)
--
2.17.2
Powered by blists - more mailing lists