lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5ae5eeb4bd12d5aa95a88590594139887257276e.1454094692.git.chengyihetaipei@gmail.com>
Date:	Sat, 30 Jan 2016 03:24:56 +0800
From:	ChengYi He <chengyihetaipei@...il.com>
To:	Andrew Morton <akpm@...ux-foundation.org>
Cc:	Mel Gorman <mgorman@...hsingularity.net>,
	Michal Hocko <mhocko@...e.com>,
	Vlastimil Babka <vbabka@...e.cz>,
	David Rientjes <rientjes@...gle.com>,
	Joonsoo Kim <js1304@...il.com>,
	Yaowei Bai <bywxiaobai@....com>,
	Xishi Qiu <qiuxishi@...wei.com>,
	Alexander Duyck <alexander.h.duyck@...hat.com>,
	"'Kirill A . Shutemov'" <kirill.shutemov@...ux.intel.com>,
	Johannes Weiner <hannes@...xchg.org>, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org, chengyihetaipei@...il.com
Subject: [RFC PATCH 1/2] mm/page_alloc: let migration fallback support pages
 of requested order

This helper function only factors out the code flow within each order
during fallback. There is no function change.

Signed-off-by: ChengYi He <chengyihetaipei@...il.com>
---
 mm/page_alloc.c | 79 +++++++++++++++++++++++++++++++++------------------------
 1 file changed, 46 insertions(+), 33 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 63358d9..50c325a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1752,51 +1752,64 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
 	}
 }
 
-/* Remove an element from the buddy allocator from the fallback list */
 static inline struct page *
-__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
+__rmqueue_fallback_order(struct zone *zone, unsigned int order,
+		int start_migratetype, int current_order)
 {
 	struct free_area *area;
-	unsigned int current_order;
 	struct page *page;
 	int fallback_mt;
 	bool can_steal;
 
-	/* Find the largest possible block of pages in the other list */
-	for (current_order = MAX_ORDER-1;
-				current_order >= order && current_order <= MAX_ORDER-1;
-				--current_order) {
-		area = &(zone->free_area[current_order]);
-		fallback_mt = find_suitable_fallback(area, current_order,
-				start_migratetype, false, &can_steal);
-		if (fallback_mt == -1)
-			continue;
+	area = &(zone->free_area[current_order]);
+	fallback_mt = find_suitable_fallback(area, current_order,
+			start_migratetype, false, &can_steal);
+	if (fallback_mt == -1)
+		return NULL;
 
-		page = list_first_entry(&area->free_list[fallback_mt],
-						struct page, lru);
-		if (can_steal)
-			steal_suitable_fallback(zone, page, start_migratetype);
+	page = list_first_entry(&area->free_list[fallback_mt],
+					struct page, lru);
+	if (can_steal)
+		steal_suitable_fallback(zone, page, start_migratetype);
 
-		/* Remove the page from the freelists */
-		area->nr_free--;
-		list_del(&page->lru);
-		rmv_page_order(page);
+	/* Remove the page from the freelists */
+	area->nr_free--;
+	list_del(&page->lru);
+	rmv_page_order(page);
 
-		expand(zone, page, order, current_order, area,
-					start_migratetype);
-		/*
-		 * The pcppage_migratetype may differ from pageblock's
-		 * migratetype depending on the decisions in
-		 * find_suitable_fallback(). This is OK as long as it does not
-		 * differ for MIGRATE_CMA pageblocks. Those can be used as
-		 * fallback only via special __rmqueue_cma_fallback() function
-		 */
-		set_pcppage_migratetype(page, start_migratetype);
+	expand(zone, page, order, current_order, area,
+				start_migratetype);
+	/*
+	 * The pcppage_migratetype may differ from pageblock's
+	 * migratetype depending on the decisions in
+	 * find_suitable_fallback(). This is OK as long as it does not
+	 * differ for MIGRATE_CMA pageblocks. Those can be used as
+	 * fallback only via special __rmqueue_cma_fallback() function
+	 */
+	set_pcppage_migratetype(page, start_migratetype);
 
-		trace_mm_page_alloc_extfrag(page, order, current_order,
-			start_migratetype, fallback_mt);
+	trace_mm_page_alloc_extfrag(page, order, current_order,
+		start_migratetype, fallback_mt);
 
-		return page;
+	return page;
+}
+
+/* Remove an element from the buddy allocator from the fallback list */
+static inline struct page *
+__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
+{
+	unsigned int current_order;
+	struct page *page;
+
+	/* Find the largest possible block of pages in the other list */
+	for (current_order = MAX_ORDER-1;
+				current_order >= order && current_order <= MAX_ORDER-1;
+				--current_order) {
+		page = __rmqueue_fallback_order(zone, order, start_migratetype,
+				current_order);
+
+		if (page)
+			return page;
 	}
 
 	return NULL;
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ