[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170309062639.31059-1-khandual@linux.vnet.ibm.com>
Date: Thu, 9 Mar 2017 11:56:39 +0530
From: Anshuman Khandual <khandual@...ux.vnet.ibm.com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc: mhocko@...e.com, vbabka@...e.cz, mgorman@...e.de,
minchan@...nel.org, aneesh.kumar@...ux.vnet.ibm.com,
bsingharora@...il.com, srikar@...ux.vnet.ibm.com,
haren@...ux.vnet.ibm.com, jglisse@...hat.com,
dave.hansen@...el.com, dan.j.williams@...el.com,
zi.yan@...rutgers.edu
Subject: [PATCH 5/6] mm/migrate: Add new migration flag MPOL_MF_MOVE_MT for syscalls
From: Zi Yan <ziy@...dia.com>
This change adds a new mode flag MPOL_MF_MOVE_MT for migration system
calls like move_pages() and mbind() which indicates request for using
the multi threaded copy method.
Signed-off-by: Zi Yan <zi.yan@...rutgers.edu>
Signed-off-by: Anshuman Khandual <khandual@...ux.vnet.ibm.com>
---
* Updated include/linux/migrate_mode.h comment as per Naoya
include/uapi/linux/mempolicy.h | 4 +++-
mm/mempolicy.c | 7 ++++++-
mm/migrate.c | 14 ++++++++++----
3 files changed, 19 insertions(+), 6 deletions(-)
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 9cd8b21..8f1db2e 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -53,10 +53,12 @@ enum mpol_rebind_step {
#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to policy */
#define MPOL_MF_LAZY (1<<3) /* Modifies '_MOVE: lazy migrate on fault */
#define MPOL_MF_INTERNAL (1<<4) /* Internal flags start here */
+#define MPOL_MF_MOVE_MT (1<<6) /* Use multi-threaded page copy routine */
#define MPOL_MF_VALID (MPOL_MF_STRICT | \
MPOL_MF_MOVE | \
- MPOL_MF_MOVE_ALL)
+ MPOL_MF_MOVE_ALL | \
+ MPOL_MF_MOVE_MT)
/*
* Internal flags that share the struct mempolicy flags word with
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d880dc6..2d06ee2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1300,9 +1300,14 @@ static long do_mbind(unsigned long start, unsigned long len,
int nr_failed = 0;
if (!list_empty(&pagelist)) {
+ enum migrate_mode mode = MIGRATE_SYNC;
+
+ if (flags & MPOL_MF_MOVE_MT)
+ mode |= MIGRATE_MT;
+
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
nr_failed = migrate_pages(&pagelist, new_page, NULL,
- start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+ start, mode, MR_MEMPOLICY_MBIND);
if (nr_failed)
putback_movable_pages(&pagelist);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 187065e..7449f7d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1481,11 +1481,16 @@ static struct page *new_page_node(struct page *p, unsigned long private,
*/
static int do_move_page_to_node_array(struct mm_struct *mm,
struct page_to_node *pm,
- int migrate_all)
+ int migrate_all,
+ int migrate_use_mt)
{
int err;
struct page_to_node *pp;
LIST_HEAD(pagelist);
+ enum migrate_mode mode = MIGRATE_SYNC;
+
+ if (migrate_use_mt)
+ mode |= MIGRATE_MT;
down_read(&mm->mmap_sem);
@@ -1562,7 +1567,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
err = 0;
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node, NULL,
- (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
+ (unsigned long)pm, mode, MR_SYSCALL);
if (err)
putback_movable_pages(&pagelist);
}
@@ -1639,7 +1644,8 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
/* Migrate this chunk */
err = do_move_page_to_node_array(mm, pm,
- flags & MPOL_MF_MOVE_ALL);
+ flags & MPOL_MF_MOVE_ALL,
+ flags & MPOL_MF_MOVE_MT);
if (err < 0)
goto out_pm;
@@ -1746,7 +1752,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
nodemask_t task_nodes;
/* Check flags */
- if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
+ if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL|MPOL_MF_MOVE_MT))
return -EINVAL;
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
--
2.1.4
Powered by blists - more mailing lists