lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1468376653-26561-1-git-send-email-minchan@kernel.org>
Date:	Wed, 13 Jul 2016 11:24:13 +0900
From:	Minchan Kim <minchan@...nel.org>
To:	Andrew Morton <akpm@...ux-foundation.org>
Cc:	Mel Gorman <mgorman@...e.de>, linux-kernel@...r.kernel.org,
	linux-mm@...ck.org, Minchan Kim <minchan@...nel.org>
Subject: [PATCH] mm: fix pgalloc_stall on unpopulated zone

If we use sc->reclaim_idx for accounting pgstall, it can increase
the count on unpopulated zone, for example, movable zone(but
my system doesn't have movable zone) if allocation request were
GFP_HIGHUSER_MOVABLE. It doesn't make no sense.

This patch fixes it so that it can account it on first populated
zone at or below highest_zoneidx of the request.

Signed-off-by: Minchan Kim <minchan@...nel.org>
---
 fs/buffer.c          | 2 +-
 include/linux/swap.h | 3 ++-
 mm/page_alloc.c      | 3 ++-
 mm/vmscan.c          | 5 +++--
 4 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/fs/buffer.c b/fs/buffer.c
index 46b3568..69841f4 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -268,7 +268,7 @@ static void free_more_memory(void)
 						gfp_zone(GFP_NOFS), NULL);
 		if (z->zone)
 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
-						GFP_NOFS, NULL);
+					GFP_NOFS, NULL, gfp_zone(GFP_NOFS));
 	}
 }
 
diff --git a/include/linux/swap.h b/include/linux/swap.h
index cc753c6..935f7e1 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -309,7 +309,8 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
 /* linux/mm/vmscan.c */
 extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-					gfp_t gfp_mask, nodemask_t *mask);
+					gfp_t gfp_mask, nodemask_t *mask,
+					enum zone_type classzone_idx);
 extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 						  unsigned long nr_pages,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 80c9b9a..5f20d4b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3305,7 +3305,8 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
 	current->reclaim_state = &reclaim_state;
 
 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
-								ac->nodemask);
+				ac->nodemask,
+				zonelist_zone_idx(ac->preferred_zoneref));
 
 	current->reclaim_state = NULL;
 	lockdep_clear_current_reclaim_state();
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c538a8c..1f91e2e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2855,13 +2855,14 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
 }
 
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-				gfp_t gfp_mask, nodemask_t *nodemask)
+				gfp_t gfp_mask, nodemask_t *nodemask,
+				enum zone_type classzone_idx)
 {
 	unsigned long nr_reclaimed;
 	struct scan_control sc = {
 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
 		.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
-		.reclaim_idx = gfp_zone(gfp_mask),
+		.reclaim_idx = classzone_idx,
 		.order = order,
 		.nodemask = nodemask,
 		.priority = DEF_PRIORITY,
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ