[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1592892828-1934-7-git-send-email-iamjoonsoo.kim@lge.com>
Date: Tue, 23 Jun 2020 15:13:46 +0900
From: js1304@...il.com
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
kernel-team@....com, Vlastimil Babka <vbabka@...e.cz>,
Christoph Hellwig <hch@...radead.org>,
Roman Gushchin <guro@...com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
Michal Hocko <mhocko@...e.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: [PATCH v3 6/8] mm/gup: use a standard migration target allocation callback
From: Joonsoo Kim <iamjoonsoo.kim@....com>
There is a well-defined migration target allocation callback.
It's mostly similar with new_non_cma_page() except considering CMA pages.
This patch adds a CMA consideration to the standard migration target
allocation callback and use it on gup.c.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
---
mm/gup.c | 57 ++++++++-------------------------------------------------
mm/internal.h | 1 +
mm/migrate.c | 4 +++-
3 files changed, 12 insertions(+), 50 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index 15be281..f6124e3 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1608,56 +1608,15 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
}
#ifdef CONFIG_CMA
-static struct page *new_non_cma_page(struct page *page, unsigned long private)
+static struct page *alloc_migration_target_non_cma(struct page *page, unsigned long private)
{
- /*
- * We want to make sure we allocate the new page from the same node
- * as the source page.
- */
- int nid = page_to_nid(page);
- /*
- * Trying to allocate a page for migration. Ignore allocation
- * failure warnings. We don't force __GFP_THISNODE here because
- * this node here is the node where we have CMA reservation and
- * in some case these nodes will have really less non movable
- * allocation memory.
- */
- gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
-
- if (PageHighMem(page))
- gfp_mask |= __GFP_HIGHMEM;
-
-#ifdef CONFIG_HUGETLB_PAGE
- if (PageHuge(page)) {
- struct hstate *h = page_hstate(page);
-
- /*
- * We don't want to dequeue from the pool because pool pages will
- * mostly be from the CMA region.
- */
- return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask, true);
- }
-#endif
- if (PageTransHuge(page)) {
- struct page *thp;
- /*
- * ignore allocation failure warnings
- */
- gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
-
- /*
- * Remove the movable mask so that we don't allocate from
- * CMA area again.
- */
- thp_gfpmask &= ~__GFP_MOVABLE;
- thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
- if (!thp)
- return NULL;
- prep_transhuge_page(thp);
- return thp;
- }
+ struct migration_target_control mtc = {
+ .nid = page_to_nid(page),
+ .gfp_mask = GFP_USER | __GFP_NOWARN,
+ .skip_cma = true,
+ };
- return __alloc_pages_node(nid, gfp_mask, 0);
+ return alloc_migration_target(page, (unsigned long)&mtc);
}
static long check_and_migrate_cma_pages(struct task_struct *tsk,
@@ -1719,7 +1678,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
- if (migrate_pages(&cma_page_list, new_non_cma_page,
+ if (migrate_pages(&cma_page_list, alloc_migration_target_non_cma,
NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
/*
* some of the pages failed migration. Do get_user_pages
diff --git a/mm/internal.h b/mm/internal.h
index f725aa8..fb7f7fe 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -619,6 +619,7 @@ struct migration_target_control {
int nid; /* preferred node id */
nodemask_t *nmask;
gfp_t gfp_mask;
+ bool skip_cma;
};
#endif /* __MM_INTERNAL_H */
diff --git a/mm/migrate.c b/mm/migrate.c
index 3afff59..7c4cd74 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1550,7 +1550,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
if (PageHuge(page)) {
return alloc_huge_page_nodemask(
page_hstate(compound_head(page)), mtc->nid,
- mtc->nmask, gfp_mask, false);
+ mtc->nmask, gfp_mask, mtc->skip_cma);
}
if (PageTransHuge(page)) {
@@ -1561,6 +1561,8 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
zidx = zone_idx(page_zone(page));
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM;
+ if (mtc->skip_cma)
+ gfp_mask &= ~__GFP_MOVABLE;
new_page = __alloc_pages_nodemask(gfp_mask, order,
mtc->nid, mtc->nmask);
--
2.7.4
Powered by blists - more mailing lists