[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181214115708.360510296@linuxfoundation.org>
Date: Fri, 14 Dec 2018 13:00:49 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Lorenzo Stoakes <lstoakes@...il.com>,
Paolo Bonzini <pbonzini@...hat.com>, Jan Kara <jack@...e.cz>,
Michal Hocko <mhocko@...e.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Ben Hutchings <ben.hutchings@...ethink.co.uk>
Subject: [PATCH 4.4 75/88] mm: remove write/force parameters from __get_user_pages_unlocked()
4.4-stable review patch. If anyone has any objections, please let me know.
------------------
From: Lorenzo Stoakes <lstoakes@...il.com>
commit d4944b0ecec0af882483fe44b66729316e575208 upstream.
This removes the redundant 'write' and 'force' parameters from
__get_user_pages_unlocked() to make the use of FOLL_FORCE explicit in
callers as use of this flag can result in surprising behaviour (and
hence bugs) within the mm subsystem.
Signed-off-by: Lorenzo Stoakes <lstoakes@...il.com>
Acked-by: Paolo Bonzini <pbonzini@...hat.com>
Reviewed-by: Jan Kara <jack@...e.cz>
Acked-by: Michal Hocko <mhocko@...e.com>
Signed-off-by: Linus Torvalds <torvalds@...ux-foundation.org>
[bwh: Backported to 4.4:
- Defer changes in process_vm_rw_single_vec() and async_pf_execute() since
they use get_user_pages_unlocked() here
- Adjust context]
Signed-off-by: Ben Hutchings <ben.hutchings@...ethink.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
include/linux/mm.h | 3 +--
mm/gup.c | 19 ++++++++++---------
mm/nommu.c | 14 ++++++++++----
virt/kvm/kvm_main.c | 11 ++++++++---
4 files changed, 29 insertions(+), 18 deletions(-)
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1207,8 +1207,7 @@ long get_user_pages_locked(struct task_s
int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags);
+ struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages);
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -764,17 +764,11 @@ EXPORT_SYMBOL(get_user_pages_locked);
*/
__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags)
+ struct page **pages, unsigned int gup_flags)
{
long ret;
int locked = 1;
- if (write)
- gup_flags |= FOLL_WRITE;
- if (force)
- gup_flags |= FOLL_FORCE;
-
down_read(&mm->mmap_sem);
ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
&locked, false, gup_flags);
@@ -805,8 +799,15 @@ long get_user_pages_unlocked(struct task
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages)
{
- return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
- force, pages, FOLL_TOUCH);
+ unsigned int flags = FOLL_TOUCH;
+
+ if (write)
+ flags |= FOLL_WRITE;
+ if (force)
+ flags |= FOLL_FORCE;
+
+ return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
+ pages, flags);
}
EXPORT_SYMBOL(get_user_pages_unlocked);
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -211,8 +211,7 @@ EXPORT_SYMBOL(get_user_pages_locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags)
+ struct page **pages, unsigned int gup_flags)
{
long ret;
down_read(&mm->mmap_sem);
@@ -227,8 +226,15 @@ long get_user_pages_unlocked(struct task
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages)
{
- return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
- force, pages, 0);
+ unsigned int flags = 0;
+
+ if (write)
+ flags |= FOLL_WRITE;
+ if (force)
+ flags |= FOLL_FORCE;
+
+ return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
+ pages, flags);
}
EXPORT_SYMBOL(get_user_pages_unlocked);
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1352,10 +1352,15 @@ static int hva_to_pfn_slow(unsigned long
npages = get_user_page_nowait(current, current->mm,
addr, write_fault, page);
up_read(¤t->mm->mmap_sem);
- } else
+ } else {
+ unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
+
+ if (write_fault)
+ flags |= FOLL_WRITE;
+
npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
- write_fault, 0, page,
- FOLL_TOUCH|FOLL_HWPOISON);
+ page, flags);
+ }
if (npages != 1)
return npages;
Powered by blists - more mailing lists