[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1461758476-450-3-git-send-email-vbabka@suse.cz>
Date: Wed, 27 Apr 2016 14:01:16 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Mel Gorman <mgorman@...hsingularity.net>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Jesper Dangaard Brouer <brouer@...hat.com>,
Vlastimil Babka <vbabka@...e.cz>
Subject: [PATCH 3/3] mm, page_alloc: don't duplicate code in free_pcp_prepare
The new free_pcp_prepare() function shares a lot of code with
free_pages_prepare(), which makes this a maintenance risk when some future
patch modifies only one of them. We should be able to achieve the same effect
(skipping free_pages_check() from !DEBUG_VM configs) by adding a parameter to
free_pages_prepare() and making it inline, so the checks (and the order != 0
parts) are eliminated from the call from free_pcp_prepare().
!DEBUG_VM: bloat-o-meter reports no difference, as my gcc was already inlining
free_pages_prepare() and the elimination seems to work as expected
DEBUG_VM bloat-o-meter:
add/remove: 0/1 grow/shrink: 2/0 up/down: 1035/-778 (257)
function old new delta
__free_pages_ok 297 1060 +763
free_hot_cold_page 480 752 +272
free_pages_prepare 778 - -778
Here inlining didn't occur before, and added some code, but it's ok for a debug
option.
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/page_alloc.c | 34 ++++++----------------------------
1 file changed, 6 insertions(+), 28 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 163d08ea43f0..b23f641348ab 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -990,7 +990,8 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
return ret;
}
-static bool free_pages_prepare(struct page *page, unsigned int order)
+static __always_inline bool free_pages_prepare(struct page *page, unsigned int order,
+ bool check_free)
{
int bad = 0;
@@ -1023,7 +1024,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
}
if (PageAnonHead(page))
page->mapping = NULL;
- if (free_pages_check(page)) {
+ if (check_free && free_pages_check(page)) {
bad++;
} else {
page_cpupid_reset_last(page);
@@ -1050,7 +1051,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
#ifdef CONFIG_DEBUG_VM
static inline bool free_pcp_prepare(struct page *page)
{
- return free_pages_prepare(page, 0);
+ return free_pages_prepare(page, 0, true);
}
static inline bool bulkfree_pcp_prepare(struct page *page)
@@ -1060,30 +1061,7 @@ static inline bool bulkfree_pcp_prepare(struct page *page)
#else
static bool free_pcp_prepare(struct page *page)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
-
- trace_mm_page_free(page, 0);
- kmemcheck_free_shadow(page, 0);
- kasan_free_pages(page, 0);
-
- if (PageAnonHead(page))
- page->mapping = NULL;
-
- reset_page_owner(page, 0);
-
- if (!PageHighMem(page)) {
- debug_check_no_locks_freed(page_address(page),
- PAGE_SIZE);
- debug_check_no_obj_freed(page_address(page),
- PAGE_SIZE);
- }
- arch_free_page(page, 0);
- kernel_poison_pages(page, 0, 0);
- kernel_map_pages(page, 0, 0);
-
- page_cpupid_reset_last(page);
- page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
- return true;
+ return free_pages_prepare(page, 0, false);
}
static bool bulkfree_pcp_prepare(struct page *page)
@@ -1260,7 +1238,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
int migratetype;
unsigned long pfn = page_to_pfn(page);
- if (!free_pages_prepare(page, order))
+ if (!free_pages_prepare(page, order, true))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
--
2.8.1
Powered by blists - more mailing lists