[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1-v1-281e425c752f+2df-gup_fork_jgg@nvidia.com>
Date: Fri, 23 Oct 2020 21:19:19 -0300
From: Jason Gunthorpe <jgg@...dia.com>
To: <linux-kernel@...r.kernel.org>
CC: Andrea Arcangeli <aarcange@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Christoph Hellwig <hch@....de>,
Hugh Dickins <hughd@...gle.com>, Jan Kara <jack@...e.cz>,
Jann Horn <jannh@...gle.com>,
John Hubbard <jhubbard@...dia.com>,
Kirill Shutemov <kirill@...temov.name>,
Kirill Tkhai <ktkhai@...tuozzo.com>,
Linux-MM <linux-mm@...ck.org>, Michal Hocko <mhocko@...e.com>,
Oleg Nesterov <oleg@...hat.com>, Peter Xu <peterx@...hat.com>
Subject: [PATCH 1/2] mm: reorganize internal_get_user_pages_fast()
The next patch in this series makes the lockless flow a little more
complex, so move the entire block into a new function and remove a level
of indention. Tidy a bit of cruft:
- addr is always the same as start, so use start
- Use the modern check_add_overflow() for computing end = start + len
- nr_pinned << PAGE_SHIFT needs an unsigned long cast, like nr_pages
- The handling of ret and nr_pinned can be streamlined a bit
No functional change.
Signed-off-by: Jason Gunthorpe <jgg@...dia.com>
---
mm/gup.c | 88 +++++++++++++++++++++++++++++---------------------------
1 file changed, 46 insertions(+), 42 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index 102877ed77a4b4..ecbe1639ea2af7 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2671,13 +2671,42 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
return ret;
}
+static unsigned int lockless_pages_from_mm(unsigned long addr,
+ unsigned long end,
+ unsigned int gup_flags,
+ struct page **pages)
+{
+ unsigned long flags;
+ int nr_pinned = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
+ !gup_fast_permitted(addr, end))
+ return 0;
+
+ /*
+ * Disable interrupts. The nested form is used, in order to allow full,
+ * general purpose use of this routine.
+ *
+ * With interrupts disabled, we block page table pages from being freed
+ * from under us. See struct mmu_table_batch comments in
+ * include/asm-generic/tlb.h for more details.
+ *
+ * We do not adopt an rcu_read_lock(.) here as we also want to block
+ * IPIs that come from THPs splitting.
+ */
+ local_irq_save(flags);
+ gup_pgd_range(addr, end, gup_flags, pages, &nr_pinned);
+ local_irq_restore(flags);
+ return nr_pinned;
+}
+
static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags,
struct page **pages)
{
- unsigned long addr, len, end;
- unsigned long flags;
- int nr_pinned = 0, ret = 0;
+ unsigned long len, end;
+ unsigned int nr_pinned;
+ int ret;
if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
FOLL_FORCE | FOLL_PIN | FOLL_GET |
@@ -2691,53 +2720,28 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
might_lock_read(¤t->mm->mmap_lock);
start = untagged_addr(start) & PAGE_MASK;
- addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
-
- if (end <= start)
+ if (check_add_overflow(start, len, &end))
return 0;
if (unlikely(!access_ok((void __user *)start, len)))
return -EFAULT;
- /*
- * Disable interrupts. The nested form is used, in order to allow
- * full, general purpose use of this routine.
- *
- * With interrupts disabled, we block page table pages from being
- * freed from under us. See struct mmu_table_batch comments in
- * include/asm-generic/tlb.h for more details.
- *
- * We do not adopt an rcu_read_lock(.) here as we also want to
- * block IPIs that come from THPs splitting.
- */
- if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && gup_fast_permitted(start, end)) {
- unsigned long fast_flags = gup_flags;
-
- local_irq_save(flags);
- gup_pgd_range(addr, end, fast_flags, pages, &nr_pinned);
- local_irq_restore(flags);
- ret = nr_pinned;
- }
-
- if (nr_pinned < nr_pages && !(gup_flags & FOLL_FAST_ONLY)) {
- /* Try to get the remaining pages with get_user_pages */
- start += nr_pinned << PAGE_SHIFT;
- pages += nr_pinned;
-
- ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned,
- gup_flags, pages);
+ nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
+ if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
+ return nr_pinned;
+ /* Try to get the remaining pages with get_user_pages */
+ start += (unsigned long)nr_pinned << PAGE_SHIFT;
+ pages += nr_pinned;
+ ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
+ pages);
+ if (ret < 0) {
/* Have to be a bit careful with return values */
- if (nr_pinned > 0) {
- if (ret < 0)
- ret = nr_pinned;
- else
- ret += nr_pinned;
- }
+ if (nr_pinned)
+ return nr_pinned;
+ return ret;
}
-
- return ret;
+ return ret + nr_pinned;
}
/**
* get_user_pages_fast_only() - pin user pages in memory
--
2.28.0
Powered by blists - more mailing lists