lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 29 Mar 2022 21:09:28 +0800
From:   Chen Wandun <chenwandun@...wei.com>
To:     <linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>,
        <akpm@...ux-foundation.org>
Subject: [PATCH 2/2] mm: fix contiguous memmap assumptions about alloc/free pages

It isn't true for only SPARSEMEM configs to assume that a compound page
has virtually contiguous page structs, so use nth_page to iterate each
page.

Signed-off-by: Chen Wandun <chenwandun@...wei.com>
---
 include/linux/mm.h |  2 ++
 mm/page_alloc.c    | 12 +++++++-----
 2 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 355075fb2654..ef48cfef7c67 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -212,9 +212,11 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
 
 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+#define page_nth(head, tail)	(page_to_pfn(tail) - page_to_pfn(head))
 #define folio_page_idx(folio, p)	(page_to_pfn(p) - folio_pfn(folio))
 #else
 #define nth_page(page,n) ((page) + (n))
+#define page_nth(head, tail)	((tail) - (head))
 #define folio_page_idx(folio, p)	((p) - &(folio)->page)
 #endif
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 855211dea13e..09bc63992d20 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -721,7 +721,7 @@ static void prep_compound_head(struct page *page, unsigned int order)
 
 static void prep_compound_tail(struct page *head, int tail_idx)
 {
-	struct page *p = head + tail_idx;
+	struct page *p = nth_page(head, tail_idx);
 
 	p->mapping = TAIL_MAPPING;
 	set_compound_head(p, head);
@@ -1213,7 +1213,7 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
 		ret = 0;
 		goto out;
 	}
-	switch (page - head_page) {
+	switch (page_nth(head_page, page)) {
 	case 1:
 		/* the first tail page: ->mapping may be compound_mapcount() */
 		if (unlikely(compound_mapcount(page))) {
@@ -1322,6 +1322,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
 	if (unlikely(order)) {
 		bool compound = PageCompound(page);
 		int i;
+		struct page *tail_page;
 
 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
 
@@ -1330,13 +1331,14 @@ static __always_inline bool free_pages_prepare(struct page *page,
 			ClearPageHasHWPoisoned(page);
 		}
 		for (i = 1; i < (1 << order); i++) {
+			tail_page = nth_page(page, i);
 			if (compound)
-				bad += free_tail_pages_check(page, page + i);
-			if (unlikely(check_free_page(page + i))) {
+				bad += free_tail_pages_check(page, tail_page);
+			if (unlikely(check_free_page(tail_page))) {
 				bad++;
 				continue;
 			}
-			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+			tail_page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
 		}
 	}
 	if (PageMappingFlags(page))
-- 
2.18.0.huawei.25

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ