[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220330102534.1053240-3-chenwandun@huawei.com>
Date: Wed, 30 Mar 2022 18:25:34 +0800
From: Chen Wandun <chenwandun@...wei.com>
To: <linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>,
<akpm@...ux-foundation.org>, <willy@...radead.org>
Subject: [PATCH v2 2/2] mm: fix contiguous memmap assumptions about alloc/free pages
It isn't true for only SPARSEMEM configs to assume that a compound page
has virtually contiguous page structs, so use nth_page to iterate each
page.
Signed-off-by: Chen Wandun <chenwandun@...wei.com>
---
mm/page_alloc.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 855211dea13e..758d8f069b32 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -721,7 +721,7 @@ static void prep_compound_head(struct page *page, unsigned int order)
static void prep_compound_tail(struct page *head, int tail_idx)
{
- struct page *p = head + tail_idx;
+ struct page *p = nth_page(head, tail_idx);
p->mapping = TAIL_MAPPING;
set_compound_head(p, head);
@@ -1199,10 +1199,10 @@ static inline int check_free_page(struct page *page)
return 1;
}
-static int free_tail_pages_check(struct page *head_page, struct page *page)
+static int free_tail_pages_check(struct page *head_page, int index)
{
+ struct page *page = nth_page(head_page, index);
int ret = 1;
-
/*
* We rely page->lru.next never has bit 0 set, unless the page
* is PageTail(). Let's make sure that's true even for poisoned ->lru.
@@ -1213,7 +1213,7 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
ret = 0;
goto out;
}
- switch (page - head_page) {
+ switch (index) {
case 1:
/* the first tail page: ->mapping may be compound_mapcount() */
if (unlikely(compound_mapcount(page))) {
@@ -1322,6 +1322,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
if (unlikely(order)) {
bool compound = PageCompound(page);
int i;
+ struct page *tail_page;
VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
@@ -1330,13 +1331,14 @@ static __always_inline bool free_pages_prepare(struct page *page,
ClearPageHasHWPoisoned(page);
}
for (i = 1; i < (1 << order); i++) {
+ tail_page = nth_page(page, i);
if (compound)
- bad += free_tail_pages_check(page, page + i);
- if (unlikely(check_free_page(page + i))) {
+ bad += free_tail_pages_check(page, i);
+ if (unlikely(check_free_page(tail_page))) {
bad++;
continue;
}
- (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+ tail_page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
}
}
if (PageMappingFlags(page))
--
2.18.0.huawei.25
Powered by blists - more mailing lists