[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170801124111.28881-7-mhocko@kernel.org>
Date: Tue, 1 Aug 2017 14:41:11 +0200
From: Michal Hocko <mhocko@...nel.org>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Mel Gorman <mgorman@...e.de>, Vlastimil Babka <vbabka@...e.cz>,
Andrea Arcangeli <aarcange@...hat.com>,
Jerome Glisse <jglisse@...hat.com>,
Reza Arbab <arbab@...ux.vnet.ibm.com>,
Yasuaki Ishimatsu <yasu.isimatu@...il.com>,
qiuxishi@...wei.com, Kani Toshimitsu <toshi.kani@....com>,
slaoub@...il.com, Joonsoo Kim <js1304@...il.com>,
Andi Kleen <ak@...ux.intel.com>,
Daniel Kiper <daniel.kiper@...cle.com>,
Igor Mammedov <imammedo@...hat.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
LKML <linux-kernel@...r.kernel.org>,
Michal Hocko <mhocko@...e.com>
Subject: [PATCH 6/6] mm, sparse: rename kmalloc_section_memmap, __kfree_section_memmap
From: Michal Hocko <mhocko@...e.com>
Both functions will use altmap rather than kmalloc for sparsemem-vmemmap
so rename them to alloc_section_memmap/free_section_memmap which better
reflect the functionality.
Signed-off-by: Michal Hocko <mhocko@...e.com>
---
mm/sparse.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/mm/sparse.c b/mm/sparse.c
index 19b9aa60f48a..be1527a37112 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -663,13 +663,13 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+static inline struct page *alloc_section_memmap(unsigned long pnum, int nid,
struct vmem_altmap *altmap)
{
/* This will make the necessary allocations eventually. */
return __sparse_mem_map_populate(pnum, nid, altmap);
}
-static void __kfree_section_memmap(struct page *memmap)
+static void free_section_memmap(struct page *memmap)
{
unsigned long start = (unsigned long)memmap;
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
@@ -707,13 +707,13 @@ static struct page *__kmalloc_section_memmap(void)
return ret;
}
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+static inline struct page *alloc_section_memmap(unsigned long pnum, int nid,
struct vmem_altmap *altmap)
{
return __kmalloc_section_memmap();
}
-static void __kfree_section_memmap(struct page *memmap)
+static void free_section_memmap(struct page *memmap)
{
if (is_vmalloc_addr(memmap))
vfree(memmap);
@@ -777,12 +777,12 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
ret = sparse_index_init(section_nr, pgdat->node_id);
if (ret < 0 && ret != -EEXIST)
return ret;
- memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
+ memmap = alloc_section_memmap(section_nr, pgdat->node_id, altmap);
if (!memmap)
return -ENOMEM;
usemap = __kmalloc_section_usemap();
if (!usemap) {
- __kfree_section_memmap(memmap);
+ free_section_memmap(memmap);
return -ENOMEM;
}
@@ -816,7 +816,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
pgdat_resize_unlock(pgdat, &flags);
if (ret <= 0) {
kfree(usemap);
- __kfree_section_memmap(memmap);
+ free_section_memmap(memmap);
}
return ret;
}
@@ -857,7 +857,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
kfree(usemap);
if (memmap)
- __kfree_section_memmap(memmap);
+ free_section_memmap(memmap);
return;
}
--
2.13.2
Powered by blists - more mailing lists