[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170726083333.17754-6-mhocko@kernel.org>
Date: Wed, 26 Jul 2017 10:33:33 +0200
From: Michal Hocko <mhocko@...nel.org>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Mel Gorman <mgorman@...e.de>, Vlastimil Babka <vbabka@...e.cz>,
Andrea Arcangeli <aarcange@...hat.com>,
Jerome Glisse <jglisse@...hat.com>,
Reza Arbab <arbab@...ux.vnet.ibm.com>,
Yasuaki Ishimatsu <yasu.isimatu@...il.com>,
qiuxishi@...wei.com, Kani Toshimitsu <toshi.kani@....com>,
slaoub@...il.com, Joonsoo Kim <js1304@...il.com>,
Andi Kleen <ak@...ux.intel.com>,
Daniel Kiper <daniel.kiper@...cle.com>,
Igor Mammedov <imammedo@...hat.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
LKML <linux-kernel@...r.kernel.org>,
Michal Hocko <mhocko@...e.com>
Subject: [RFC PATCH 5/5] mm, sparse: rename kmalloc_section_memmap, __kfree_section_memmap
From: Michal Hocko <mhocko@...e.com>
Both functions will use altmap rather than kmalloc for sparsemem-vmemmap
so rename them to alloc_section_memmap/free_section_memmap which better
reflect the functionality.
Signed-off-by: Michal Hocko <mhocko@...e.com>
---
mm/sparse.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/mm/sparse.c b/mm/sparse.c
index 8460ecb9e69b..eb2e45b35a28 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -667,13 +667,13 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+static inline struct page *alloc_section_memmap(unsigned long pnum, int nid,
struct vmem_altmap *altmap)
{
/* This will make the necessary allocations eventually. */
return __sparse_mem_map_populate(pnum, nid, altmap);
}
-static void __kfree_section_memmap(struct page *memmap)
+static void free_section_memmap(struct page *memmap)
{
unsigned long start = (unsigned long)memmap;
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
@@ -711,13 +711,13 @@ static struct page *__kmalloc_section_memmap(void)
return ret;
}
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+static inline struct page *alloc_section_memmap(unsigned long pnum, int nid,
struct vmem_altmap *altmap)
{
return __kmalloc_section_memmap();
}
-static void __kfree_section_memmap(struct page *memmap)
+static void free_section_memmap(struct page *memmap)
{
if (is_vmalloc_addr(memmap))
vfree(memmap);
@@ -781,12 +781,12 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
ret = sparse_index_init(section_nr, pgdat->node_id);
if (ret < 0 && ret != -EEXIST)
return ret;
- memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
+ memmap = alloc_section_memmap(section_nr, pgdat->node_id, altmap);
if (!memmap)
return -ENOMEM;
usemap = __kmalloc_section_usemap();
if (!usemap) {
- __kfree_section_memmap(memmap);
+ free_section_memmap(memmap);
return -ENOMEM;
}
@@ -820,7 +820,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
pgdat_resize_unlock(pgdat, &flags);
if (ret <= 0) {
kfree(usemap);
- __kfree_section_memmap(memmap);
+ free_section_memmap(memmap);
}
return ret;
}
@@ -861,7 +861,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
kfree(usemap);
if (memmap)
- __kfree_section_memmap(memmap);
+ free_section_memmap(memmap);
return;
}
--
2.13.2
Powered by blists - more mailing lists