[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181214230310.572-9-mgorman@techsingularity.net>
Date: Fri, 14 Dec 2018 23:03:04 +0000
From: Mel Gorman <mgorman@...hsingularity.net>
To: Linux-MM <linux-mm@...ck.org>
Cc: David Rientjes <rientjes@...gle.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Michal Hocko <mhocko@...nel.org>, ying.huang@...el.com,
kirill@...temov.name, Andrew Morton <akpm@...ux-foundation.org>,
Linux List Kernel Mailing <linux-kernel@...r.kernel.org>,
Mel Gorman <mgorman@...hsingularity.net>
Subject: [PATCH 08/14] mm, compaction: Use the page allocator bulk-free helper for lists of pages
release_pages() is a simpler version of free_unref_page_list() but it
tracks the highest PFN for caching the restart point of the compaction
free scanner. This patch optionally tracks the highest PFN in the core
helper and converts compaction to use it.
Signed-off-by: Mel Gorman <mgorman@...hsingularity.net>
---
include/linux/gfp.h | 7 ++++++-
mm/compaction.c | 12 +++---------
mm/page_alloc.c | 10 +++++++++-
3 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0705164f928c..ed9a95b63374 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -543,7 +543,12 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
extern void free_unref_page(struct page *page);
-extern void free_unref_page_list(struct list_head *list);
+extern void __free_page_list(struct list_head *list, bool dropref, unsigned long *highest_pfn);
+
+static inline void free_unref_page_list(struct list_head *list)
+{
+ return __free_page_list(list, false, NULL);
+}
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
diff --git a/mm/compaction.c b/mm/compaction.c
index 4f51435c645a..8ba9b3b479e3 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -52,16 +52,10 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
static unsigned long release_freepages(struct list_head *freelist)
{
- struct page *page, *next;
- unsigned long high_pfn = 0;
+ unsigned long high_pfn;
- list_for_each_entry_safe(page, next, freelist, lru) {
- unsigned long pfn = page_to_pfn(page);
- list_del(&page->lru);
- __free_page(page);
- if (pfn > high_pfn)
- high_pfn = pfn;
- }
+ __free_page_list(freelist, true, &high_pfn);
+ INIT_LIST_HEAD(freelist);
return high_pfn;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a6e7bfd18cde..80535cd55a92 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2961,18 +2961,26 @@ void free_unref_page(struct page *page)
/*
* Free a list of 0-order pages
*/
-void free_unref_page_list(struct list_head *list)
+void __free_page_list(struct list_head *list, bool dropref,
+ unsigned long *highest_pfn)
{
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
+ if (highest_pfn)
+ *highest_pfn = 0;
+
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
+ if (dropref)
+ WARN_ON_ONCE(!put_page_testzero(page));
pfn = page_to_pfn(page);
if (!free_unref_page_prepare(page, pfn))
list_del(&page->lru);
set_page_private(page, pfn);
+ if (highest_pfn && pfn > *highest_pfn)
+ *highest_pfn = pfn;
}
local_irq_save(flags);
--
2.16.4
Powered by blists - more mailing lists