[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1594622517-20681-7-git-send-email-iamjoonsoo.kim@lge.com>
Date: Mon, 13 Jul 2020 15:41:54 +0900
From: js1304@...il.com
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
kernel-team@....com, Vlastimil Babka <vbabka@...e.cz>,
Christoph Hellwig <hch@...radead.org>,
Roman Gushchin <guro@...com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
Michal Hocko <mhocko@...e.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: [PATCH v5 6/9] mm/mempolicy: use a standard migration target allocation callback
From: Joonsoo Kim <iamjoonsoo.kim@....com>
There is a well-defined migration target allocation callback. Use it.
Acked-by: Michal Hocko <mhocko@...e.com>
Acked-by: Vlastimil Babka <vbabka@...e.cz>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
---
mm/internal.h | 1 -
mm/mempolicy.c | 31 ++++++-------------------------
mm/migrate.c | 8 ++++++--
3 files changed, 12 insertions(+), 28 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 0beacf3..10c6776 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -613,7 +613,6 @@ static inline bool is_migrate_highatomic_page(struct page *page)
}
void setup_zone_pageset(struct zone *zone);
-extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
struct migration_target_control {
int nid; /* preferred node id */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9034a53..93fcfc1 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1065,29 +1065,6 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
return 0;
}
-/* page allocation callback for NUMA node migration */
-struct page *alloc_new_node_page(struct page *page, unsigned long node)
-{
- if (PageHuge(page)) {
- struct hstate *h = page_hstate(compound_head(page));
- gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
-
- return alloc_huge_page_nodemask(h, node, NULL, gfp_mask);
- } else if (PageTransHuge(page)) {
- struct page *thp;
-
- thp = alloc_pages_node(node,
- (GFP_TRANSHUGE | __GFP_THISNODE),
- HPAGE_PMD_ORDER);
- if (!thp)
- return NULL;
- prep_transhuge_page(thp);
- return thp;
- } else
- return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
- __GFP_THISNODE, 0);
-}
-
/*
* Migrate pages from one node to a target node.
* Returns error or the number of pages not migrated.
@@ -1098,6 +1075,10 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
nodemask_t nmask;
LIST_HEAD(pagelist);
int err = 0;
+ struct migration_target_control mtc = {
+ .nid = dest,
+ .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+ };
nodes_clear(nmask);
node_set(source, nmask);
@@ -1112,8 +1093,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
- err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
- MIGRATE_SYNC, MR_SYSCALL);
+ err = migrate_pages(&pagelist, alloc_migration_target, NULL,
+ (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(&pagelist);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index c35ba2a..1a891c4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1591,9 +1591,13 @@ static int do_move_pages_to_node(struct mm_struct *mm,
struct list_head *pagelist, int node)
{
int err;
+ struct migration_target_control mtc = {
+ .nid = node,
+ .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+ };
- err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
- MIGRATE_SYNC, MR_SYSCALL);
+ err = migrate_pages(pagelist, alloc_migration_target, NULL,
+ (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(pagelist);
return err;
--
2.7.4
Powered by blists - more mailing lists