[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200220043316.19668-5-bhe@redhat.com>
Date: Thu, 20 Feb 2020 12:33:13 +0800
From: Baoquan He <bhe@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, akpm@...ux-foundation.org,
richardw.yang@...ux.intel.com, david@...hat.com, osalvador@...e.de,
dan.j.williams@...el.com, mhocko@...e.com, bhe@...hat.com,
rppt@...ux.ibm.com, robin.murphy@....com
Subject: [PATCH v2 4/7] mm/sparse.c: only use subsection map in VMEMMAP case
Currently, subsection map is used when SPARSEMEM is enabled, including
VMEMMAP case and !VMEMMAP case. However, subsection hotplug is not
supported at all in SPARSEMEM|!VMEMMAP case, subsection map is unnecessary
and misleading. Let's adjust code to only allow subsection map being
used in VMEMMAP case.
Signed-off-by: Baoquan He <bhe@...hat.com>
---
include/linux/mmzone.h | 2 ++
mm/sparse.c | 20 ++++++++++++++++++++
2 files changed, 22 insertions(+)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 462f6873905a..fc0de3a9a51e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1185,7 +1185,9 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
+#endif
/* See declaration of similar field in struct zone */
unsigned long pageblock_flags[0];
};
diff --git a/mm/sparse.c b/mm/sparse.c
index df857ee9330c..66c497d6a229 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -209,6 +209,7 @@ static inline unsigned long first_present_section_nr(void)
return next_present_section_nr(-1);
}
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
static void subsection_mask_set(unsigned long *map, unsigned long pfn,
unsigned long nr_pages)
{
@@ -243,6 +244,11 @@ void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
nr_pages -= pfns;
}
}
+#else
+void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
+{
+}
+#endif
/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
@@ -726,6 +732,7 @@ static void free_map_bootmem(struct page *memmap)
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
/**
* clear_subsection_map - Clear subsection map of one memory region
*
@@ -764,6 +771,12 @@ static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
return 1;
}
+#else
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ return 0;
+}
+#endif
static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
@@ -820,6 +833,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
ms->section_mem_map = (unsigned long)NULL;
}
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
/**
* fill_subsection_map - fill subsection map of a memory region
* @pfn - start pfn of the memory range
@@ -854,6 +868,12 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
return rc;
}
+#else
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ return 0;
+}
+#endif
static struct page * __meminit section_activate(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap)
--
2.17.2
Powered by blists - more mailing lists