[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1497939652-16528-5-git-send-email-rppt@linux.vnet.ibm.com>
Date: Tue, 20 Jun 2017 09:20:49 +0300
From: Mike Rapoport <rppt@...ux.vnet.ibm.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Andrea Arcangeli <aarcange@...hat.com>,
Hugh Dickins <hughd@...gle.com>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Hillf Danton <hillf.zj@...baba-inc.com>,
Pavel Emelyanov <xemul@...tuozzo.com>,
linux mm <linux-mm@...ck.org>,
lkml <linux-kernel@...r.kernel.org>,
Mike Rapoport <rppt@...ux.vnet.ibm.com>
Subject: [PATCH 4/7] userfaultfd: mcopy_atomic: introduce mfill_atomic_pte helper
Shuffle the code a bit to improve readability.
Signed-off-by: Mike Rapoport <rppt@...ux.vnet.ibm.com>
---
mm/userfaultfd.c | 46 ++++++++++++++++++++++++++++++----------------
1 file changed, 30 insertions(+), 16 deletions(-)
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 8bcb501..48c015c 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -371,6 +371,34 @@ extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
bool zeropage);
#endif /* CONFIG_HUGETLB_PAGE */
+static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
+ pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ struct page **page,
+ bool zeropage)
+{
+ ssize_t err;
+
+ if (vma_is_anonymous(dst_vma)) {
+ if (!zeropage)
+ err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
+ dst_addr, src_addr, page);
+ else
+ err = mfill_zeropage_pte(dst_mm, dst_pmd,
+ dst_vma, dst_addr);
+ } else {
+ err = -EINVAL; /* if zeropage is true return -EINVAL */
+ if (likely(!zeropage))
+ err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
+ dst_vma, dst_addr,
+ src_addr, page);
+ }
+
+ return err;
+}
+
static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long src_start,
@@ -487,22 +515,8 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
BUG_ON(pmd_none(*dst_pmd));
BUG_ON(pmd_trans_huge(*dst_pmd));
- if (vma_is_anonymous(dst_vma)) {
- if (!zeropage)
- err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
- dst_addr, src_addr,
- &page);
- else
- err = mfill_zeropage_pte(dst_mm, dst_pmd,
- dst_vma, dst_addr);
- } else {
- err = -EINVAL; /* if zeropage is true return -EINVAL */
- if (likely(!zeropage))
- err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
- dst_vma, dst_addr,
- src_addr, &page);
- }
-
+ err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
+ src_addr, &page, zeropage);
cond_resched();
if (unlikely(err == -EFAULT)) {
--
2.7.4
Powered by blists - more mailing lists