[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180731144545.fh5syvwcecgvqul6@xakep.localdomain>
Date: Tue, 31 Jul 2018 10:45:45 -0400
From: Pavel Tatashin <pasha.tatashin@...cle.com>
To: Oscar Salvador <osalvador@...hadventures.net>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Michal Hocko <mhocko@...e.com>,
Vlastimil Babka <vbabka@...e.cz>,
kirill.shutemov@...ux.intel.com, iamjoonsoo.kim@....com,
Mel Gorman <mgorman@...e.de>,
Souptick Joarder <jrdr.linux@...il.com>,
Linux Memory Management List <linux-mm@...ck.org>,
LKML <linux-kernel@...r.kernel.org>, osalvador@...e.de
Subject: Re: [PATCH] mm: make __paginginit based on CONFIG_MEMORY_HOTPLUG
On 18-07-31 16:41:57, Oscar Salvador wrote:
> On Tue, Jul 31, 2018 at 08:49:11AM -0400, Pavel Tatashin wrote:
> > Hi Oscar,
> >
> > Have you looked into replacing __paginginit via __meminit ? What is
> > the reason to keep both?
> Hi Pavel,
>
> Actually, thinking a bit more about this, it might make sense to remove
> __paginginit altogether and keep only __meminit.
> Looking at the original commit, I think that it was put as a way to abstract it.
>
> After the patchset [1] has been applied, only two functions marked as __paginginit
> remain, so it will be less hassle to replace that with __meminit.
>
> I will send a v2 tomorrow to be applied on top of [1].
>
> [1] https://patchwork.kernel.org/patch/10548861/
>
> Thanks
> --
> Oscar Salvador
> SUSE L3
>
Here the patch would look like this:
>From e640b32dbd329bba5a785cc60050d5d7e1ca18ce Mon Sep 17 00:00:00 2001
From: Pavel Tatashin <pasha.tatashin@...cle.com>
Date: Tue, 31 Jul 2018 10:37:44 -0400
Subject: [PATCH] mm: remove __paginginit
__paginginit is the same thing as __meminit except for platforms without
sparsemem, there it is defined as __init.
Remove __paginginit and use __meminit. Use __ref in one single function
that merges __meminit and __init sections: setup_usemap().
Signed-off-by: Pavel Tatashin <pasha.tatashin@...cle.com>
---
mm/internal.h | 12 ------------
mm/page_alloc.c | 19 ++++++++++---------
2 files changed, 10 insertions(+), 21 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 33c22754d282..87256ae1bef8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -389,18 +389,6 @@ static inline struct page *mem_map_next(struct page *iter,
return iter + 1;
}
-/*
- * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
- * so all functions starting at paging_init should be marked __init
- * in those cases. SPARSEMEM, however, allows for memory hotplug,
- * and alloc_bootmem_node is not used.
- */
-#ifdef CONFIG_SPARSEMEM
-#define __paginginit __meminit
-#else
-#define __paginginit __init
-#endif
-
/* Memory initialisation debug and verification */
enum mminit_level {
MMINIT_WARNING,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 02e4b84038f8..92abe3eb151d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6122,7 +6122,7 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
return usemapsize / 8;
}
-static void __init setup_usemap(struct pglist_data *pgdat,
+static void __ref setup_usemap(struct pglist_data *pgdat,
struct zone *zone,
unsigned long zone_start_pfn,
unsigned long zonesize)
@@ -6142,7 +6142,7 @@ static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-void __paginginit set_pageblock_order(void)
+void __meminit set_pageblock_order(void)
{
unsigned int order;
@@ -6170,14 +6170,14 @@ void __paginginit set_pageblock_order(void)
* include/linux/pageblock-flags.h for the values of pageblock_order based on
* the kernel config
*/
-void __paginginit set_pageblock_order(void)
+void __meminit set_pageblock_order(void)
{
}
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
-static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
- unsigned long present_pages)
+static unsigned long __meminit calc_memmap_size(unsigned long spanned_pages,
+ unsigned long present_pages)
{
unsigned long pages = spanned_pages;
@@ -6204,7 +6204,7 @@ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
*
* NOTE: pgdat should get zeroed by caller.
*/
-static void __paginginit free_area_init_core(struct pglist_data *pgdat)
+static void __meminit free_area_init_core(struct pglist_data *pgdat)
{
enum zone_type j;
int nid = pgdat->node_id;
@@ -6344,8 +6344,9 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
-void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
- unsigned long node_start_pfn, unsigned long *zholes_size)
+void __meminit free_area_init_node(int nid, unsigned long *zones_size,
+ unsigned long node_start_pfn,
+ unsigned long *zholes_size)
{
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long start_pfn = 0;
@@ -6390,7 +6391,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
* may be accessed (for example page_to_pfn() on some configuration accesses
* flags). We must explicitly zero those struct pages.
*/
-void __paginginit zero_resv_unavail(void)
+void __meminit zero_resv_unavail(void)
{
phys_addr_t start, end;
unsigned long pfn;
--
2.18.0
Powered by blists - more mailing lists