[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210115170907.24498-5-peterx@redhat.com>
Date: Fri, 15 Jan 2021 12:08:41 -0500
From: Peter Xu <peterx@...hat.com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc: Mike Rapoport <rppt@...ux.vnet.ibm.com>,
Mike Kravetz <mike.kravetz@...cle.com>, peterx@...hat.com,
Jerome Glisse <jglisse@...hat.com>,
"Kirill A . Shutemov" <kirill@...temov.name>,
Hugh Dickins <hughd@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Matthew Wilcox <willy@...radead.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Andrea Arcangeli <aarcange@...hat.com>,
Nadav Amit <nadav.amit@...il.com>
Subject: [PATCH RFC 04/30] shmem/userfaultfd: Take care of UFFDIO_COPY_MODE_WP
Firstly, pass wp_copy into shmem_mfill_atomic_pte() through the stack.
Then apply the UFFD_WP bit properly when the UFFDIO_COPY on shmem is with
UFFDIO_COPY_MODE_WP.
One thing to mention is that shmem_mfill_atomic_pte() needs to set the dirty
bit in pte even if UFFDIO_COPY_MODE_WP is set. The reason is similar to
dcf7fe9d8976 ("userfaultfd: shmem: UFFDIO_COPY: set the page dirty if VM_WRITE
is not set") where we need to set page as dirty even if VM_WRITE is no there.
It's just that shmem can drop the pte any time later, and if it's not dirty the
data will be dropped. For uffd-wp, that could lead to data loss if without the
dirty bit set.
Note that shmem_mfill_zeropage_pte() will always call shmem_mfill_atomic_pte()
with wp_copy==false because UFFDIO_ZEROCOPY does not support UFFDIO_COPY_MODE_WP.
Signed-off-by: Peter Xu <peterx@...hat.com>
---
include/linux/shmem_fs.h | 5 +++--
mm/shmem.c | 26 +++++++++++++++++++-------
mm/userfaultfd.c | 2 +-
3 files changed, 23 insertions(+), 10 deletions(-)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a5a5d1d4d7b1..9d6fc68a1e57 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -123,14 +123,15 @@ extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- struct page **pagep);
+ struct page **pagep,
+ bool wp_copy);
extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr);
#else
#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
+ src_addr, pagep, wp_copy) ({ BUG(); 0; })
#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
dst_addr) ({ BUG(); 0; })
#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 537c137698f8..de45333626f7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2363,7 +2363,8 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
unsigned long dst_addr,
unsigned long src_addr,
bool zeropage,
- struct page **pagep)
+ struct page **pagep,
+ bool wp_copy)
{
struct inode *inode = file_inode(dst_vma->vm_file);
struct shmem_inode_info *info = SHMEM_I(inode);
@@ -2425,9 +2426,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
goto out_release;
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
- if (dst_vma->vm_flags & VM_WRITE)
- _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
- else {
+ if (dst_vma->vm_flags & VM_WRITE) {
+ if (wp_copy)
+ _dst_pte = pte_mkuffd_wp(pte_wrprotect(_dst_pte));
+ else
+ _dst_pte = pte_mkwrite(_dst_pte);
+ /*
+ * Similar reason to set_page_dirty(), that we need to mark the
+ * pte dirty even if wp_copy==true here, otherwise the pte and
+ * its page could be dropped at anytime when e.g. swapped out.
+ */
+ _dst_pte = pte_mkdirty(_dst_pte);
+ } else {
/*
* We don't set the pte dirty if the vma has no
* VM_WRITE permission, so mark the page dirty or it
@@ -2485,10 +2495,12 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- struct page **pagep)
+ struct page **pagep,
+ bool wp_copy)
{
return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
- dst_addr, src_addr, false, pagep);
+ dst_addr, src_addr, false, pagep,
+ wp_copy);
}
int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
@@ -2499,7 +2511,7 @@ int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
struct page *page = NULL;
return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
- dst_addr, 0, true, &page);
+ dst_addr, 0, true, &page, false);
}
#ifdef CONFIG_TMPFS
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 9a3d451402d7..6d4b3b7c7f9f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -445,7 +445,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
if (!zeropage)
err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
dst_vma, dst_addr,
- src_addr, page);
+ src_addr, page, wp_copy);
else
err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
dst_vma, dst_addr);
--
2.26.2
Powered by blists - more mailing lists