[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190215220856.29749-8-zi.yan@sent.com>
Date: Fri, 15 Feb 2019 14:08:32 -0800
From: Zi Yan <zi.yan@...t.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: Dave Hansen <dave.hansen@...ux.intel.com>,
Michal Hocko <mhocko@...nel.org>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>,
Mel Gorman <mgorman@...hsingularity.net>,
John Hubbard <jhubbard@...dia.com>,
Mark Hairgrove <mhairgrove@...dia.com>,
Nitin Gupta <nigupta@...dia.com>,
David Nellans <dnellans@...dia.com>, Zi Yan <ziy@...dia.com>
Subject: [RFC PATCH 07/31] mm: deallocate pages with order > MAX_ORDER.
From: Zi Yan <ziy@...dia.com>
When MAX_ORDER is not set to allocate 1GB pages and 1GB THPs are created
from in-place promotion, we need this to properly free 1GB THPs.
Signed-off-by: Zi Yan <ziy@...dia.com>
---
mm/page_alloc.c | 36 ++++++++++++++++++++++++++++++------
1 file changed, 30 insertions(+), 6 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9ba2cdc320f2..cfa99bb54bd6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1287,6 +1287,24 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
}
}
+static void destroy_compound_gigantic_page(struct page *page,
+ unsigned int order)
+{
+ int i;
+ int nr_pages = 1 << order;
+ struct page *p = page + 1;
+
+ atomic_set(compound_mapcount_ptr(page), 0);
+ for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+ clear_compound_head(p);
+ set_page_refcounted(p);
+ }
+
+ set_compound_order(page, 0);
+ __ClearPageHead(page);
+ set_page_refcounted(page);
+}
+
static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
@@ -1296,11 +1314,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (!free_pages_prepare(page, order, true))
return;
- migratetype = get_pfnblock_migratetype(page, pfn);
- local_irq_save(flags);
- __count_vm_events(PGFREE, 1 << order);
- free_one_page(page_zone(page), page, pfn, order, migratetype);
- local_irq_restore(flags);
+ if (order > MAX_ORDER) {
+ destroy_compound_gigantic_page(page, order);
+ free_contig_range(page_to_pfn(page), 1 << order);
+ } else {
+ migratetype = get_pfnblock_migratetype(page, pfn);
+ local_irq_save(flags);
+ __count_vm_events(PGFREE, 1 << order);
+ free_one_page(page_zone(page), page, pfn, order, migratetype);
+ local_irq_restore(flags);
+ }
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -8281,6 +8304,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
return ret;
}
+#endif
+
void free_contig_range(unsigned long pfn, unsigned nr_pages)
{
unsigned int count = 0;
@@ -8293,7 +8318,6 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
}
WARN(count != 0, "%d pages are still in use!\n", count);
}
-#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/*
--
2.20.1
Powered by blists - more mailing lists