lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 30 May 2019 14:54:04 -0700
From:   Alexander Duyck <alexander.duyck@...il.com>
To:     nitesh@...hat.com, kvm@...r.kernel.org, david@...hat.com,
        mst@...hat.com, dave.hansen@...el.com,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc:     yang.zhang.wz@...il.com, pagupta@...hat.com, riel@...riel.com,
        konrad.wilk@...cle.com, lcapitulino@...hat.com,
        wei.w.wang@...el.com, aarcange@...hat.com, pbonzini@...hat.com,
        dan.j.williams@...el.com, alexander.h.duyck@...ux.intel.com
Subject: [RFC PATCH 04/11] mm: Split nr_free into nr_free_raw and
 nr_free_treated

From: Alexander Duyck <alexander.h.duyck@...ux.intel.com>

Split the nr_free value into two values that track where the pages were
inserted into the list. The idea is that we can use this later to track
which pages were treated and added to the free list versus the raw pages
which were just added to the head of the list.

Signed-off-by: Alexander Duyck <alexander.h.duyck@...ux.intel.com>
---
 include/linux/mmzone.h |   36 ++++++++++++++++++++++++++++++++----
 mm/compaction.c        |    4 ++--
 mm/page_alloc.c        |   14 +++++++++-----
 mm/vmstat.c            |    5 +++--
 4 files changed, 46 insertions(+), 13 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0263d5bf0b84..988c3094b686 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -89,7 +89,8 @@ static inline bool is_migrate_movable(int mt)
 
 struct free_area {
 	struct list_head	free_list[MIGRATE_TYPES];
-	unsigned long		nr_free;
+	unsigned long		nr_free_raw;
+	unsigned long		nr_free_treated;
 };
 
 /* Used for pages not on another list */
@@ -97,7 +98,7 @@ static inline void add_to_free_area(struct page *page, struct free_area *area,
 			     int migratetype)
 {
 	list_add(&page->lru, &area->free_list[migratetype]);
-	area->nr_free++;
+	area->nr_free_raw++;
 }
 
 /* Used for pages not on another list */
@@ -105,13 +106,31 @@ static inline void add_to_free_area_tail(struct page *page, struct free_area *ar
 				  int migratetype)
 {
 	list_add_tail(&page->lru, &area->free_list[migratetype]);
-	area->nr_free++;
+	area->nr_free_raw++;
 }
 
 /* Used for pages which are on another list */
 static inline void move_to_free_area(struct page *page, struct free_area *area,
 			     int migratetype)
 {
+	/*
+	 * Since we are moving the page out of one migrate type and into
+	 * another the page will be added to the head of the new list.
+	 *
+	 * To avoid creating an island of raw pages floating between two
+	 * sections of treated pages we should reset the page type and
+	 * just re-treat the page when we process the destination.
+	 *
+	 * No need to trigger a notification for this since the page itself
+	 * is actually treated and we are just doing this for logistical
+	 * reasons.
+	 */
+	if (PageTreated(page)) {
+		__ResetPageTreated(page);
+		area->nr_free_treated--;
+		area->nr_free_raw++;
+	}
+
 	list_move(&page->lru, &area->free_list[migratetype]);
 }
 
@@ -125,11 +144,15 @@ static inline struct page *get_page_from_free_area(struct free_area *area,
 static inline void del_page_from_free_area(struct page *page,
 		struct free_area *area)
 {
+	if (PageTreated(page))
+		area->nr_free_treated--;
+	else
+		area->nr_free_raw--;
+
 	list_del(&page->lru);
 	__ClearPageBuddy(page);
 	__ResetPageTreated(page);
 	set_page_private(page, 0);
-	area->nr_free--;
 }
 
 static inline bool free_area_empty(struct free_area *area, int migratetype)
@@ -137,6 +160,11 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
 	return list_empty(&area->free_list[migratetype]);
 }
 
+static inline unsigned long nr_pages_in_free_area(struct free_area *area)
+{
+	return area->nr_free_raw + area->nr_free_treated;
+}
+
 struct pglist_data;
 
 /*
diff --git a/mm/compaction.c b/mm/compaction.c
index 9febc8cc84e7..f5a27d5dccdf 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1318,7 +1318,7 @@ static int next_search_order(struct compact_control *cc, int order)
 		unsigned long flags;
 		unsigned int order_scanned = 0;
 
-		if (!area->nr_free)
+		if (!nr_pages_in_free_area(area))
 			continue;
 
 		spin_lock_irqsave(&cc->zone->lock, flags);
@@ -1674,7 +1674,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
 		unsigned long flags;
 		struct page *freepage;
 
-		if (!area->nr_free)
+		if (!nr_pages_in_free_area(area))
 			continue;
 
 		spin_lock_irqsave(&cc->zone->lock, flags);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2894990862bd..10eaea762627 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2418,7 +2418,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
 	int i;
 	int fallback_mt;
 
-	if (area->nr_free == 0)
+	if (!nr_pages_in_free_area(area))
 		return -1;
 
 	*can_steal = false;
@@ -3393,7 +3393,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
 		struct free_area *area = &z->free_area[o];
 		int mt;
 
-		if (!area->nr_free)
+		if (!nr_pages_in_free_area(area))
 			continue;
 
 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
@@ -5325,7 +5325,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
 			struct free_area *area = &zone->free_area[order];
 			int type;
 
-			nr[order] = area->nr_free;
+			nr[order] = nr_pages_in_free_area(area);
 			total += nr[order] << order;
 
 			types[order] = 0;
@@ -5944,9 +5944,13 @@ void __ref memmap_init_zone_device(struct zone *zone,
 static void __meminit zone_init_free_lists(struct zone *zone)
 {
 	unsigned int order, t;
-	for_each_migratetype_order(order, t) {
+
+	for_each_migratetype_order(order, t)
 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
-		zone->free_area[order].nr_free = 0;
+
+	for (order = MAX_ORDER; order--; ) {
+		zone->free_area[order].nr_free_raw = 0;
+		zone->free_area[order].nr_free_treated = 0;
 	}
 }
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fd7e16ca6996..aa822fda4250 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1031,7 +1031,7 @@ static void fill_contig_page_info(struct zone *zone,
 		unsigned long blocks;
 
 		/* Count number of free blocks */
-		blocks = zone->free_area[order].nr_free;
+		blocks = nr_pages_in_free_area(&zone->free_area[order]);
 		info->free_blocks_total += blocks;
 
 		/* Count free base pages */
@@ -1353,7 +1353,8 @@ static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
 
 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
 	for (order = 0; order < MAX_ORDER; ++order)
-		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
+		seq_printf(m, "%6lu ",
+			   nr_pages_in_free_area(&zone->free_area[order]));
 	seq_putc(m, '\n');
 }
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ