[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20161031100228.17917-3-lstoakes@gmail.com>
Date: Mon, 31 Oct 2016 10:02:28 +0000
From: Lorenzo Stoakes <lstoakes@...il.com>
To: linux-mm@...ck.org
Cc: Linus Torvalds <torvalds@...ux-foundation.org>,
Michal Hocko <mhocko@...nel.org>, Jan Kara <jack@...e.cz>,
Hugh Dickins <hughd@...gle.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Rik van Riel <riel@...hat.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Andrew Morton <akpm@...ux-foundation.org>,
Paolo Bonzini <pbonzini@...hat.com>,
linux-kernel@...r.kernel.org, linux-cris-kernel@...s.com,
linux-ia64@...r.kernel.org, dri-devel@...ts.freedesktop.org,
linux-rdma@...r.kernel.org, kvm@...r.kernel.org,
linux-media@...r.kernel.org, devel@...verdev.osuosl.org,
Lorenzo Stoakes <lstoakes@...il.com>
Subject: [PATCH 2/2] mm: remove get_user_pages_locked()
get_user_pages() now has an int *locked parameter which renders
get_user_pages_locked() redundant, so remove it.
This patch should not introduce any functional changes.
Signed-off-by: Lorenzo Stoakes <lstoakes@...il.com>
---
include/linux/mm.h | 2 --
mm/frame_vector.c | 4 ++--
mm/gup.c | 56 +++++++++++++++++++-----------------------------------
mm/nommu.c | 8 --------
4 files changed, 22 insertions(+), 48 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 396984e..4db3147 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1278,8 +1278,6 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index db77dcb..c5ce1b1 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -55,8 +55,8 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
vec->got_ref = true;
vec->is_pfns = false;
- ret = get_user_pages_locked(start, nr_frames,
- gup_flags, (struct page **)(vec->ptrs), &locked);
+ ret = get_user_pages(start, nr_frames,
+ gup_flags, (struct page **)(vec->ptrs), NULL, &locked);
goto out;
}
diff --git a/mm/gup.c b/mm/gup.c
index 6b5539e..6cec693 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -826,37 +826,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
}
/*
- * We can leverage the VM_FAULT_RETRY functionality in the page fault
- * paths better by using either get_user_pages_locked() or
- * get_user_pages_unlocked().
- *
- * get_user_pages_locked() is suitable to replace the form:
- *
- * down_read(&mm->mmap_sem);
- * do_something()
- * get_user_pages(tsk, mm, ..., pages, NULL, NULL);
- * up_read(&mm->mmap_sem);
- *
- * to:
- *
- * int locked = 1;
- * down_read(&mm->mmap_sem);
- * do_something()
- * get_user_pages_locked(tsk, mm, ..., pages, &locked);
- * if (locked)
- * up_read(&mm->mmap_sem);
- */
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- int *locked)
-{
- return __get_user_pages_locked(current, current->mm, start, nr_pages,
- pages, NULL, locked, true,
- gup_flags | FOLL_TOUCH);
-}
-EXPORT_SYMBOL(get_user_pages_locked);
-
-/*
* Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
* pass additional gup_flags as last parameter (like FOLL_HWPOISON).
*
@@ -954,11 +923,6 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* use the correct cache flushing APIs.
*
* See also get_user_pages_fast, for performance critical applications.
- *
- * get_user_pages should be phased out in favor of
- * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
- * should use get_user_pages because it cannot pass
- * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
*/
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
@@ -976,6 +940,26 @@ EXPORT_SYMBOL(get_user_pages_remote);
* less-flexible calling convention where we assume that the task
* and mm being operated on are the current task's. We also
* obviously don't pass FOLL_REMOTE in here.
+ *
+ * We can leverage the VM_FAULT_RETRY functionality in the page fault
+ * paths better by using either get_user_pages()'s locked parameter or
+ * get_user_pages_unlocked().
+ *
+ * get_user_pages()'s locked parameter is suitable to replace the form:
+ *
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages(tsk, mm, ..., pages, NULL, NULL);
+ * up_read(&mm->mmap_sem);
+ *
+ * to:
+ *
+ * int locked = 1;
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages(tsk, mm, ..., pages, NULL, &locked);
+ * if (locked)
+ * up_read(&mm->mmap_sem);
*/
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
diff --git a/mm/nommu.c b/mm/nommu.c
index 82aaa33..3d38d40 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -168,14 +168,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
}
EXPORT_SYMBOL(get_user_pages);
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- int *locked)
-{
- return get_user_pages(start, nr_pages, gup_flags, pages, NULL, NULL);
-}
-EXPORT_SYMBOL(get_user_pages_locked);
-
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
--
2.10.1
Powered by blists - more mailing lists