lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1663730246-11968-1-git-send-email-zhaoyang.huang@unisoc.com>
Date:   Wed, 21 Sep 2022 11:17:26 +0800
From:   "zhaoyang.huang" <zhaoyang.huang@...soc.com>
To:     Andrew Morton <akpm@...ux-foundation.org>,
        Catalin Marinas <catalin.marinas@....com>,
        Matthew Wilcox <willy@...radead.org>,
        Zhaoyang Huang <huangzhaoyang@...il.com>, <linux-mm@...ck.org>,
        <linux-kernel@...r.kernel.org>, <ke.wang@...soc.com>,
        <steve.kang@...soc.com>
Subject: [PATCHv2] mm: introduce NR_BAD_PAGES and track them via kmemleak

From: Zhaoyang Huang <zhaoyang.huang@...soc.com>

Bad pages could be introduced by extra reference among high order pages or compound
tail pages which cause the pages failed go back to allocator and leaved as orphan
pages. Booking them down and tracking them via kmemleak.

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@...soc.com>
---
change of v2: add booking for bad pages
---
---
 include/linux/mmzone.h |  1 +
 mm/page_alloc.c        | 13 ++++++++++---
 mm/vmstat.c            |  1 +
 3 files changed, 12 insertions(+), 3 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e24b40c..11c1422 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -166,6 +166,7 @@ enum zone_stat_item {
 	NR_ZSPAGES,		/* allocated in zsmalloc */
 #endif
 	NR_FREE_CMA_PAGES,
+	NR_BAD_PAGES,
 	NR_VM_ZONE_STAT_ITEMS };
 
 enum node_stat_item {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5486d4..a3768c96 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1408,7 +1408,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
 			__memcg_kmem_uncharge_page(page, order);
 		reset_page_owner(page, order);
 		page_table_check_free(page, order);
-		return false;
+		goto err;
 	}
 
 	/*
@@ -1442,7 +1442,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
 	if (check_free)
 		bad += check_free_page(page);
 	if (bad)
-		return false;
+		goto err;
 
 	page_cpupid_reset_last(page);
 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
@@ -1486,6 +1486,11 @@ static __always_inline bool free_pages_prepare(struct page *page,
 	debug_pagealloc_unmap_pages(page, 1 << order);
 
 	return true;
+err:
+	__mod_zone_page_state(page_zone(page), NR_BAD_PAGES, 1 << order);
+	kmemleak_alloc(page_address(page), PAGE_SIZE << order, 1, GFP_KERNEL);
+	return false;
+
 }
 
 #ifdef CONFIG_DEBUG_VM
@@ -1587,8 +1592,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 			count -= nr_pages;
 			pcp->count -= nr_pages;
 
-			if (bulkfree_pcp_prepare(page))
+			if (bulkfree_pcp_prepare(page)) {
+				__mod_zone_page_state(page_zone(page), NR_BAD_PAGES, 1 << order);
 				continue;
+			}
 
 			/* MIGRATE_ISOLATE page should not go to pcplists */
 			VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 90af9a8..d391352 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1193,6 +1193,7 @@ int fragmentation_index(struct zone *zone, unsigned int order)
 	"nr_zspages",
 #endif
 	"nr_free_cma",
+	"nr_bad_pages",
 
 	/* enum numa_stat_item counters */
 #ifdef CONFIG_NUMA
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ