[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210715201422.211004-3-peterx@redhat.com>
Date: Thu, 15 Jul 2021 16:13:58 -0400
From: Peter Xu <peterx@...hat.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: Jason Gunthorpe <jgg@...pe.ca>,
Mike Kravetz <mike.kravetz@...cle.com>,
David Hildenbrand <david@...hat.com>,
Alistair Popple <apopple@...dia.com>,
Matthew Wilcox <willy@...radead.org>,
"Kirill A . Shutemov" <kirill@...temov.name>,
Hugh Dickins <hughd@...gle.com>,
Tiberiu Georgescu <tiberiu.georgescu@...anix.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Nadav Amit <nadav.amit@...il.com>,
Mike Rapoport <rppt@...ux.vnet.ibm.com>,
Jerome Glisse <jglisse@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Miaohe Lin <linmiaohe@...wei.com>, peterx@...hat.com
Subject: [PATCH v5 02/26] shmem/userfaultfd: Take care of UFFDIO_COPY_MODE_WP
Firstly, pass wp_copy into shmem_mfill_atomic_pte() through the stack.
Then apply the UFFD_WP bit properly when the UFFDIO_COPY on shmem is with
UFFDIO_COPY_MODE_WP, then wp_copy lands mfill_atomic_install_pte() which is
newly introduced very recently.
We need to make sure shmem_mfill_atomic_pte() will always set the dirty bit in
pte even if UFFDIO_COPY_MODE_WP is set. After the rework of minor fault series
on shmem we need to slightly touch up the logic there, since uffd-wp needs to
be applied even if writable==false previously (e.g., for shmem private mapping).
Note: we must do pte_wrprotect() if !writable in mfill_atomic_install_pte(), as
mk_pte() could return a writable pte (e.g., when VM_SHARED on a shmem file).
Signed-off-by: Peter Xu <peterx@...hat.com>
---
include/linux/shmem_fs.h | 4 ++--
mm/shmem.c | 4 ++--
mm/userfaultfd.c | 23 +++++++++++++++--------
3 files changed, 19 insertions(+), 12 deletions(-)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 8e775ce517bb..ba6ea8c4d534 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -128,11 +128,11 @@ extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- bool zeropage,
+ bool zeropage, bool wp_copy,
struct page **pagep);
#else /* !CONFIG_SHMEM */
#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \
- src_addr, zeropage, pagep) ({ BUG(); 0; })
+ src_addr, zeropage, wp_copy, pagep) ({ BUG(); 0; })
#endif /* CONFIG_SHMEM */
#endif /* CONFIG_USERFAULTFD */
diff --git a/mm/shmem.c b/mm/shmem.c
index dc9f95b5fb34..9608200f8a64 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2368,7 +2368,7 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- bool zeropage,
+ bool zeropage, bool wp_copy,
struct page **pagep)
{
struct inode *inode = file_inode(dst_vma->vm_file);
@@ -2439,7 +2439,7 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
goto out_release;
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
- page, true, false);
+ page, true, wp_copy);
if (ret)
goto out_delete_from_cache;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index b30a3724c701..2a9c9e6eb876 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -70,14 +70,22 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
_dst_pte = pte_mkdirty(_dst_pte);
- if (page_in_cache && !vm_shared)
+ /* Don't write if uffd-wp wr-protected */
+ if (wp_copy) {
+ _dst_pte = pte_mkuffd_wp(_dst_pte);
writable = false;
- if (writable) {
- if (wp_copy)
- _dst_pte = pte_mkuffd_wp(_dst_pte);
- else
- _dst_pte = pte_mkwrite(_dst_pte);
}
+ /* Don't write if page cache privately mapped */
+ if (page_in_cache && !vm_shared)
+ writable = false;
+ if (writable)
+ _dst_pte = pte_mkwrite(_dst_pte);
+ else
+ /*
+ * We need this to make sure write bit removed; as mk_pte()
+ * could return a pte with write bit set.
+ */
+ _dst_pte = pte_wrprotect(_dst_pte);
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
@@ -467,11 +475,10 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
err = mfill_zeropage_pte(dst_mm, dst_pmd,
dst_vma, dst_addr);
} else {
- VM_WARN_ON_ONCE(wp_copy);
err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_addr, src_addr,
mode != MCOPY_ATOMIC_NORMAL,
- page);
+ wp_copy, page);
}
return err;
--
2.31.1
Powered by blists - more mailing lists