lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 28 Apr 2023 19:00:41 +0800
From:   "zhaoyang.huang" <zhaoyang.huang@...soc.com>
To:     Andrew Morton <akpm@...ux-foundation.org>,
        Roman Gushchin <guro@...com>,
        Roman Gushchin <roman.gushchin@...ux.dev>,
        <linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>,
        Zhaoyang Huang <huangzhaoyang@...il.com>, <ke.wang@...soc.com>
Subject: [PATCH] mm: optimization on page allocation when CMA enabled

From: Zhaoyang Huang <zhaoyang.huang@...soc.com>

Please be notice bellowing typical scenario that commit 168676649 introduce,
that is, 12MB free cma pages 'help' GFP_MOVABLE to keep draining/fragmenting
U&R page blocks until they shrink to 12MB without enter slowpath which against
current reclaiming policy. This commit change the criteria from hard coded '1/2'
to watermark check which leave U&R free pages stay around WMARK_LOW when being
fallback.

DMA32 free:25900kB boost:0kB min:4176kB low:25856kB high:29516kB

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@...soc.com>
---
 mm/page_alloc.c | 40 ++++++++++++++++++++++++++++++++++++----
 1 file changed, 36 insertions(+), 4 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0745aed..97768fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3071,6 +3071,39 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
 
 }
 
+#ifdef CONFIG_CMA
+static bool __if_use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
+{
+	unsigned long cma_proportion = 0;
+	unsigned long cma_free_proportion = 0;
+	unsigned long watermark = 0;
+	unsigned long wm_fact[ALLOC_WMARK_MASK] = {1, 1, 2};
+	long count = 0;
+	bool cma_first = false;
+
+	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
+	/*check if GFP_MOVABLE pass previous watermark check via the help of CMA*/
+	if (!zone_watermark_ok(zone, order, watermark, 0, alloc_flags & (~ALLOC_CMA)))
+	{
+		alloc_flags &= ALLOC_WMARK_MASK;
+		/* WMARK_LOW failed lead to using cma first, this helps U&R stay
+		 * around low when being drained by GFP_MOVABLE
+		 */
+		if (alloc_flags <= ALLOC_WMARK_LOW)
+			cma_first = true;
+		/*check proportion for WMARK_HIGH*/
+		else {
+			count = atomic_long_read(&zone->managed_pages);
+			cma_proportion = zone->cma_pages * 100 / count;
+			cma_free_proportion = zone_page_state(zone, NR_FREE_CMA_PAGES) * 100
+				/  zone_page_state(zone, NR_FREE_PAGES);
+			cma_first = (cma_free_proportion >= wm_fact[alloc_flags] * cma_proportion
+					|| cma_free_proportion >= 50);
+		}
+	}
+	return cma_first;
+}
+#endif
 /*
  * Do the hard work of removing an element from the buddy allocator.
  * Call me with the zone->lock already held.
@@ -3087,10 +3120,9 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
 		 * allocating from CMA when over half of the zone's free memory
 		 * is in the CMA area.
 		 */
-		if (alloc_flags & ALLOC_CMA &&
-		    zone_page_state(zone, NR_FREE_CMA_PAGES) >
-		    zone_page_state(zone, NR_FREE_PAGES) / 2) {
-			page = __rmqueue_cma_fallback(zone, order);
+		if (migratetype == MIGRATE_MOVABLE) {
+			bool cma_first = __if_use_cma_first(zone, order, alloc_flags);
+			page = cma_first ? __rmqueue_cma_fallback(zone, order) : NULL;
 			if (page)
 				return page;
 		}
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ