[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <155977187919.2443951.8925592545929008845.stgit@dwillia2-desk3.amr.corp.intel.com>
Date: Wed, 05 Jun 2019 14:57:59 -0700
From: Dan Williams <dan.j.williams@...el.com>
To: akpm@...ux-foundation.org
Cc: Michal Hocko <mhocko@...e.com>, Vlastimil Babka <vbabka@...e.cz>,
Logan Gunthorpe <logang@...tatee.com>,
Oscar Salvador <osalvador@...e.de>,
Pavel Tatashin <pasha.tatashin@...een.com>,
Jane Chu <jane.chu@...cle.com>, linux-mm@...ck.org,
linux-nvdimm@...ts.01.org, linux-kernel@...r.kernel.org,
osalvador@...e.de, mhocko@...e.com
Subject: [PATCH v9 02/12] mm/sparsemem: Add helpers track active portions of
a section at boot
Prepare for hot{plug,remove} of sub-ranges of a section by tracking a
sub-section active bitmask, each bit representing a PMD_SIZE span of the
architecture's memory hotplug section size.
The implications of a partially populated section is that pfn_valid()
needs to go beyond a valid_section() check and read the sub-section
active ranges from the bitmask. The expectation is that the bitmask
(subsection_map) fits in the same cacheline as the valid_section() data,
so the incremental performance overhead to pfn_valid() should be
negligible.
Cc: Michal Hocko <mhocko@...e.com>
Cc: Vlastimil Babka <vbabka@...e.cz>
Cc: Logan Gunthorpe <logang@...tatee.com>
Cc: Oscar Salvador <osalvador@...e.de>
Cc: Pavel Tatashin <pasha.tatashin@...een.com>
Tested-by: Jane Chu <jane.chu@...cle.com>
Signed-off-by: Dan Williams <dan.j.williams@...el.com>
---
include/linux/mmzone.h | 29 ++++++++++++++++++++++++++++-
mm/page_alloc.c | 4 +++-
mm/sparse.c | 35 +++++++++++++++++++++++++++++++++++
3 files changed, 66 insertions(+), 2 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ac163f2f274f..6dd52d544857 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1199,6 +1199,8 @@ struct mem_section_usage {
unsigned long pageblock_flags[0];
};
+void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
+
struct page;
struct page_ext;
struct mem_section {
@@ -1336,12 +1338,36 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
extern int __highest_present_section_nr;
+static inline int subsection_map_index(unsigned long pfn)
+{
+ return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+ int idx = subsection_map_index(pfn);
+
+ return test_bit(idx, ms->usage->subsection_map);
+}
+#else
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+ return 1;
+}
+#endif
+
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
static inline int pfn_valid(unsigned long pfn)
{
+ struct mem_section *ms;
+
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ ms = __nr_to_section(pfn_to_section_nr(pfn));
+ if (!valid_section(ms))
+ return 0;
+ return pfn_section_valid(ms, pfn);
}
#endif
@@ -1373,6 +1399,7 @@ void sparse_init(void);
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
#define pfn_present pfn_valid
+#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c6d8224d792e..bd773efe5b82 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7292,10 +7292,12 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
/* Print out the early node map */
pr_info("Early memory node ranges\n");
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
(u64)start_pfn << PAGE_SHIFT,
((u64)end_pfn << PAGE_SHIFT) - 1);
+ subsection_map_init(start_pfn, end_pfn - start_pfn);
+ }
/* Initialise every node */
mminit_verify_pageflags_layout();
diff --git a/mm/sparse.c b/mm/sparse.c
index 71da15cc7432..0baa2e55cfdd 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -210,6 +210,41 @@ static inline unsigned long first_present_section_nr(void)
return next_present_section_nr(-1);
}
+void subsection_mask_set(unsigned long *map, unsigned long pfn,
+ unsigned long nr_pages)
+{
+ int idx = subsection_map_index(pfn);
+ int end = subsection_map_index(pfn + nr_pages - 1);
+
+ bitmap_set(map, idx, end - idx + 1);
+}
+
+void subsection_map_init(unsigned long pfn, unsigned long nr_pages)
+{
+ int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
+ int i, start_sec = pfn_to_section_nr(pfn);
+
+ if (!nr_pages)
+ return;
+
+ for (i = start_sec; i <= end_sec; i++) {
+ struct mem_section *ms;
+ unsigned long pfns;
+
+ pfns = min(nr_pages, PAGES_PER_SECTION
+ - (pfn & ~PAGE_SECTION_MASK));
+ ms = __nr_to_section(i);
+ subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
+
+ pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, i,
+ pfns, subsection_map_index(pfn),
+ subsection_map_index(pfn + pfns - 1));
+
+ pfn += pfns;
+ nr_pages -= pfns;
+ }
+}
+
/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
{
Powered by blists - more mailing lists