lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 18 May 2020 10:20:56 +0900
From:   js1304@...il.com
To:     Andrew Morton <akpm@...ux-foundation.org>
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        kernel-team@....com, Vlastimil Babka <vbabka@...e.cz>,
        Christoph Hellwig <hch@...radead.org>,
        Roman Gushchin <guro@...com>,
        Mike Kravetz <mike.kravetz@...cle.com>,
        Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
        Michal Hocko <mhocko@...e.com>,
        Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: [PATCH 10/11] mm/mempolicy: use standard migration target allocation function

From: Joonsoo Kim <iamjoonsoo.kim@....com>

There is no reason to implement it's own function for migration
target allocation. Use standard one.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
---
 mm/internal.h  |  3 ---
 mm/mempolicy.c | 33 ++++-----------------------------
 mm/migrate.c   |  4 +++-
 3 files changed, 7 insertions(+), 33 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index abe94a7..5ade079 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -612,9 +612,6 @@ static inline bool is_migrate_highatomic_page(struct page *page)
 }
 
 void setup_zone_pageset(struct zone *zone);
-struct alloc_control;
-extern struct page *alloc_new_node_page(struct page *page,
-				struct alloc_control *ac);
 
 struct alloc_control {
 	int nid;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7241621..8d3ccab 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1065,33 +1065,6 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
 	return 0;
 }
 
-/* page allocation callback for NUMA node migration */
-struct page *alloc_new_node_page(struct page *page, struct alloc_control *__ac)
-{
-	if (PageHuge(page)) {
-		struct hstate *h = page_hstate(page);
-		struct alloc_control ac = {
-			.nid = __ac->nid,
-			.nmask = NULL,
-			.thisnode = true,
-		};
-
-		return alloc_huge_page_nodemask(h, &ac);
-	} else if (PageTransHuge(page)) {
-		struct page *thp;
-
-		thp = alloc_pages_node(__ac->nid,
-			(GFP_TRANSHUGE | __GFP_THISNODE),
-			HPAGE_PMD_ORDER);
-		if (!thp)
-			return NULL;
-		prep_transhuge_page(thp);
-		return thp;
-	} else
-		return __alloc_pages_node(__ac->nid, GFP_HIGHUSER_MOVABLE |
-						    __GFP_THISNODE, 0);
-}
-
 /*
  * Migrate pages from one node to a target node.
  * Returns error or the number of pages not migrated.
@@ -1104,6 +1077,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 	int err = 0;
 	struct alloc_control ac = {
 		.nid = dest,
+		.gfp_mask = GFP_HIGHUSER_MOVABLE,
+		.thisnode = true,
 	};
 
 	nodes_clear(nmask);
@@ -1119,8 +1094,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
 	if (!list_empty(&pagelist)) {
-		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, &ac,
-					MIGRATE_SYNC, MR_SYSCALL);
+		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
+					&ac, MIGRATE_SYNC, MR_SYSCALL);
 		if (err)
 			putback_movable_pages(&pagelist);
 	}
diff --git a/mm/migrate.c b/mm/migrate.c
index 029af0b..3dfb108 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1574,9 +1574,11 @@ static int do_move_pages_to_node(struct mm_struct *mm,
 	int err;
 	struct alloc_control ac = {
 		.nid = node,
+		.gfp_mask = GFP_HIGHUSER_MOVABLE,
+		.thisnode = true,
 	};
 
-	err = migrate_pages(pagelist, alloc_new_node_page, NULL, &ac,
+	err = migrate_pages(pagelist, alloc_migration_target, NULL, &ac,
 			MIGRATE_SYNC, MR_SYSCALL);
 	if (err)
 		putback_movable_pages(pagelist);
-- 
2.7.4

Powered by blists - more mailing lists