lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <558E0A51.1040807@huawei.com>
Date:	Sat, 27 Jun 2015 10:28:33 +0800
From:	Xishi Qiu <qiuxishi@...wei.com>
To:	Andrew Morton <akpm@...ux-foundation.org>,
	"H. Peter Anvin" <hpa@...or.com>, Ingo Molnar <mingo@...nel.org>,
	"Luck, Tony" <tony.luck@...el.com>,
	Hanjun Guo <guohanjun@...wei.com>,
	Xiexiuqi <xiexiuqi@...wei.com>, <leon@...n.nu>,
	Kamezawa Hiroyuki <kamezawa.hiroyu@...fujitsu.com>,
	Dave Hansen <dave.hansen@...el.com>,
	Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
	Vlastimil Babka <vbabka@...e.cz>, Mel Gorman <mgorman@...e.de>
CC:	Xishi Qiu <qiuxishi@...wei.com>, Linux MM <linux-mm@...ck.org>,
	LKML <linux-kernel@...r.kernel.org>
Subject: [RFC v2 PATCH 8/8] mm: add the PCP interface

Abstract the PCP code in __rmqueue_pcp(), and do not call fallback in
rmqueue_bulk() when the migratetype is mirror.

Signed-off-by: Xishi Qiu <qiuxishi@...wei.com>
---
 mm/page_alloc.c | 85 +++++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 61 insertions(+), 24 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8a6125e..bb44463 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1337,11 +1337,20 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
 			unsigned long count, struct list_head *list,
 			int migratetype, bool cold)
 {
-	int i;
+	int i, mt;
+	struct page *page;
 
 	spin_lock(&zone->lock);
 	for (i = 0; i < count; ++i) {
-		struct page *page = __rmqueue(zone, order, migratetype);
+		/*
+		 * If there is no mirrored memory left, just keep the list
+		 * empty, because we can not mix other types pages into the
+		 * mirror list.
+		 */
+		if (is_migrate_mirror(migratetype))
+			page = __rmqueue_smallest(zone, order, migratetype);
+		else
+			page = __rmqueue(zone, order, migratetype);
 		if (unlikely(page == NULL))
 			break;
 
@@ -1359,15 +1368,61 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
 		else
 			list_add_tail(&page->lru, list);
 		list = &page->lru;
-		if (is_migrate_cma(get_freepage_migratetype(page)))
+
+		mt = get_freepage_migratetype(page);
+		if (is_migrate_cma(mt))
 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
 					      -(1 << order));
+		if (is_migrate_mirror(mt))
+			__mod_zone_page_state(zone, NR_FREE_MIRROR_PAGES,
+					      -(1 << order));
 	}
 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
 	spin_unlock(&zone->lock);
 	return i;
 }
 
+static struct page *__rmqueue_pcp(struct zone *zone, unsigned int order,
+				gfp_t gfp_flags, int migratetype)
+{
+	struct page *page;
+	struct per_cpu_pages *pcp;
+	struct list_head *list;
+	bool cold;
+
+	cold = ((gfp_flags & __GFP_COLD) != 0);
+	pcp = &this_cpu_ptr(zone->pageset)->pcp;
+
+retry:
+	list = &pcp->lists[migratetype];
+	if (list_empty(list)) {
+		pcp->count += rmqueue_bulk(zone, 0,
+				pcp->batch, list,
+				migratetype, cold);
+		if (unlikely(list_empty(list))) {
+			/*
+			 * If there is no mirrored memory left, alloc other
+			 * types PCP, use MIGRATE_RECLAIMABLE to retry
+			 */
+			if (is_migrate_mirror(migratetype)) {
+				migratetype = MIGRATE_RECLAIMABLE;
+				goto retry;
+			} else
+				return NULL;
+		}
+	}
+
+	if (cold)
+		page = list_entry(list->prev, struct page, lru);
+	else
+		page = list_entry(list->next, struct page, lru);
+
+	list_del(&page->lru);
+	pcp->count--;
+
+	return page;
+}
+
 #ifdef CONFIG_NUMA
 /*
  * Called from the vmstat counter updater to drain pagesets of this
@@ -1713,30 +1768,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
 {
 	unsigned long flags;
 	struct page *page;
-	bool cold = ((gfp_flags & __GFP_COLD) != 0);
 
 	if (likely(order == 0)) {
-		struct per_cpu_pages *pcp;
-		struct list_head *list;
-
 		local_irq_save(flags);
-		pcp = &this_cpu_ptr(zone->pageset)->pcp;
-		list = &pcp->lists[migratetype];
-		if (list_empty(list)) {
-			pcp->count += rmqueue_bulk(zone, 0,
-					pcp->batch, list,
-					migratetype, cold);
-			if (unlikely(list_empty(list)))
-				goto failed;
-		}
-
-		if (cold)
-			page = list_entry(list->prev, struct page, lru);
-		else
-			page = list_entry(list->next, struct page, lru);
-
-		list_del(&page->lru);
-		pcp->count--;
+		page = __rmqueue_pcp(zone, order, gfp_flags, migratetype);
+		if (!page)
+			goto failed;
 	} else {
 		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
 			/*
-- 
2.0.0


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ