[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180228030308.1116-5-pasha.tatashin@oracle.com>
Date: Tue, 27 Feb 2018 22:03:06 -0500
From: Pavel Tatashin <pasha.tatashin@...cle.com>
To: steven.sistare@...cle.com, daniel.m.jordan@...cle.com,
akpm@...ux-foundation.org, mgorman@...hsingularity.net,
mhocko@...e.com, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
gregkh@...uxfoundation.org, vbabka@...e.cz,
bharata@...ux.vnet.ibm.com, tglx@...utronix.de, mingo@...hat.com,
hpa@...or.com, x86@...nel.org, dan.j.williams@...el.com,
kirill.shutemov@...ux.intel.com, bhe@...hat.com
Subject: [v5 4/6] mm/memory_hotplug: optimize probe routine
When memory is hotplugged pages_correctly_reserved() is called to verify
that the added memory is present, this routine traverses through every
struct page and verifies that PageReserved() is set. This is a slow
operation especially if a large amount of memory is added.
Instead of checking every page, it is enough to simply check that the
section is present, has mapping (struct page array is allocated), and the
mapping is online.
In addition, we should not excpect that probe routine sets flags in struct
page, as the struct pages have not yet been initialized. The initialization
should be done in __init_single_page(), the same as during boot.
Signed-off-by: Pavel Tatashin <pasha.tatashin@...cle.com>
Reviewed-by: Ingo Molnar <mingo@...nel.org>
Acked-by: Michal Hocko <mhocko@...e.com>
---
drivers/base/memory.c | 36 ++++++++++++++++++++----------------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index fe4b24f05f6a..deb3f029b451 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -187,13 +187,14 @@ int memory_isolate_notify(unsigned long val, void *v)
}
/*
- * The probe routines leave the pages reserved, just as the bootmem code does.
- * Make sure they're still that way.
+ * The probe routines leave the pages uninitialized, just as the bootmem code
+ * does. Make sure we do not access them, but instead use only information from
+ * within sections.
*/
-static bool pages_correctly_reserved(unsigned long start_pfn)
+static bool pages_correctly_probed(unsigned long start_pfn)
{
- int i, j;
- struct page *page;
+ unsigned long section_nr = pfn_to_section_nr(start_pfn);
+ unsigned long section_nr_end = section_nr + sections_per_block;
unsigned long pfn = start_pfn;
/*
@@ -201,21 +202,24 @@ static bool pages_correctly_reserved(unsigned long start_pfn)
* SPARSEMEM_VMEMMAP. We lookup the page once per section
* and assume memmap is contiguous within each section
*/
- for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
+ for (; section_nr < section_nr_end; section_nr++) {
if (WARN_ON_ONCE(!pfn_valid(pfn)))
return false;
- page = pfn_to_page(pfn);
-
- for (j = 0; j < PAGES_PER_SECTION; j++) {
- if (PageReserved(page + j))
- continue;
-
- printk(KERN_WARNING "section number %ld page number %d "
- "not reserved, was it already online?\n",
- pfn_to_section_nr(pfn), j);
+ if (!present_section_nr(section_nr)) {
+ pr_warn("section %ld pfn[%lx, %lx) not present",
+ section_nr, pfn, pfn + PAGES_PER_SECTION);
+ return false;
+ } else if (!valid_section_nr(section_nr)) {
+ pr_warn("section %ld pfn[%lx, %lx) no valid memmap",
+ section_nr, pfn, pfn + PAGES_PER_SECTION);
+ return false;
+ } else if (online_section_nr(section_nr)) {
+ pr_warn("section %ld pfn[%lx, %lx) is already online",
+ section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
}
+ pfn += PAGES_PER_SECTION;
}
return true;
@@ -237,7 +241,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
switch (action) {
case MEM_ONLINE:
- if (!pages_correctly_reserved(start_pfn))
+ if (!pages_correctly_probed(start_pfn))
return -EBUSY;
ret = online_pages(start_pfn, nr_pages, online_type);
--
2.16.2
Powered by blists - more mailing lists