[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181006024949.20691-2-jhubbard@nvidia.com>
Date: Fri, 5 Oct 2018 19:49:47 -0700
From: john.hubbard@...il.com
To: Matthew Wilcox <willy@...radead.org>,
Michal Hocko <mhocko@...nel.org>,
Christopher Lameter <cl@...ux.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Dan Williams <dan.j.williams@...el.com>,
Jan Kara <jack@...e.cz>
Cc: linux-mm@...ck.org, LKML <linux-kernel@...r.kernel.org>,
linux-rdma <linux-rdma@...r.kernel.org>,
linux-fsdevel@...r.kernel.org, John Hubbard <jhubbard@...dia.com>
Subject: [PATCH v3 1/3] mm: get_user_pages: consolidate error handling
From: John Hubbard <jhubbard@...dia.com>
An upcoming patch requires a way to operate on each page that
any of the get_user_pages_*() variants returns.
In preparation for that, consolidate the error handling for
__get_user_pages(). This provides a single location (the "out:" label)
for operating on the collected set of pages that are about to be returned.
As long every use of the "ret" variable is being edited, rename
"ret" --> "err", so that its name matches its true role.
This also gets rid of two shadowed variable declarations, as a
tiny beneficial a side effect.
Reviewed-by: Jan Kara <jack@...e.cz>
Signed-off-by: John Hubbard <jhubbard@...dia.com>
---
mm/gup.c | 37 ++++++++++++++++++++++---------------
1 file changed, 22 insertions(+), 15 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index 1abc8b4afff6..05ee7c18e59a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -660,6 +660,7 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct **vmas, int *nonblocking)
{
long i = 0;
+ int err = 0;
unsigned int page_mask;
struct vm_area_struct *vma = NULL;
@@ -685,18 +686,19 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (!vma || start >= vma->vm_end) {
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) {
- int ret;
- ret = get_gate_page(mm, start & PAGE_MASK,
+ err = get_gate_page(mm, start & PAGE_MASK,
gup_flags, &vma,
pages ? &pages[i] : NULL);
- if (ret)
- return i ? : ret;
+ if (err)
+ goto out;
page_mask = 0;
goto next_page;
}
- if (!vma || check_vma_flags(vma, gup_flags))
- return i ? : -EFAULT;
+ if (!vma || check_vma_flags(vma, gup_flags)) {
+ err = -EFAULT;
+ goto out;
+ }
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
@@ -709,23 +711,25 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* If we have a pending SIGKILL, don't keep faulting pages and
* potentially allocating memory.
*/
- if (unlikely(fatal_signal_pending(current)))
- return i ? i : -ERESTARTSYS;
+ if (unlikely(fatal_signal_pending(current))) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
cond_resched();
page = follow_page_mask(vma, start, foll_flags, &page_mask);
if (!page) {
- int ret;
- ret = faultin_page(tsk, vma, start, &foll_flags,
+ err = faultin_page(tsk, vma, start, &foll_flags,
nonblocking);
- switch (ret) {
+ switch (err) {
case 0:
goto retry;
case -EFAULT:
case -ENOMEM:
case -EHWPOISON:
- return i ? i : ret;
+ goto out;
case -EBUSY:
- return i;
+ err = 0;
+ goto out;
case -ENOENT:
goto next_page;
}
@@ -737,7 +741,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
*/
goto next_page;
} else if (IS_ERR(page)) {
- return i ? i : PTR_ERR(page);
+ err = PTR_ERR(page);
+ goto out;
}
if (pages) {
pages[i] = page;
@@ -757,7 +762,9 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages);
- return i;
+
+out:
+ return i ? i : err;
}
static bool vma_permits_fault(struct vm_area_struct *vma,
--
2.19.0
Powered by blists - more mailing lists