[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210323004912.35132-2-peterx@redhat.com>
Date: Mon, 22 Mar 2021 20:48:50 -0400
From: Peter Xu <peterx@...hat.com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc: "Kirill A . Shutemov" <kirill@...temov.name>,
Jerome Glisse <jglisse@...hat.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Matthew Wilcox <willy@...radead.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Hugh Dickins <hughd@...gle.com>, peterx@...hat.com,
Nadav Amit <nadav.amit@...il.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Mike Rapoport <rppt@...ux.vnet.ibm.com>
Subject: [PATCH 01/23] shmem/userfaultfd: Take care of UFFDIO_COPY_MODE_WP
Firstly, pass wp_copy into shmem_mfill_atomic_pte() through the stack.
Then apply the UFFD_WP bit properly when the UFFDIO_COPY on shmem is with
UFFDIO_COPY_MODE_WP.
One thing to mention is that shmem_mfill_atomic_pte() needs to set the dirty
bit in pte even if UFFDIO_COPY_MODE_WP is set. The reason is similar to
dcf7fe9d8976 ("userfaultfd: shmem: UFFDIO_COPY: set the page dirty if VM_WRITE
is not set") where we need to set page as dirty even if VM_WRITE is no there.
It's just that shmem can drop the pte any time later, and if it's not dirty the
data will be dropped. For uffd-wp, that could lead to data loss if without the
dirty bit set.
Note that shmem_mfill_zeropage_pte() will always call shmem_mfill_atomic_pte()
with wp_copy==false because UFFDIO_ZEROCOPY does not support UFFDIO_COPY_MODE_WP.
Signed-off-by: Peter Xu <peterx@...hat.com>
---
include/linux/shmem_fs.h | 5 +++--
mm/shmem.c | 18 ++++++++++++++----
mm/userfaultfd.c | 2 +-
3 files changed, 18 insertions(+), 7 deletions(-)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index f0919c3722e7..dfd0369657d8 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -128,10 +128,11 @@ extern void shmem_uncharge(struct inode *inode, long pages);
int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long src_addr,
- enum mcopy_atomic_mode mode, struct page **pagep);
+ enum mcopy_atomic_mode mode, struct page **pagep,
+ bool wp_copy);
#else /* !CONFIG_SHMEM */
#define shmem_mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \
- src_addr, mode, pagep) ({ BUG(); 0; })
+ src_addr, mode, pagep, wp_copy) ({ BUG(); 0; })
#endif /* CONFIG_SHMEM */
#endif /* CONFIG_USERFAULTFD */
diff --git a/mm/shmem.c b/mm/shmem.c
index 5cfd2fb6e52b..e88aaabaeb27 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2364,7 +2364,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long src_addr,
- enum mcopy_atomic_mode mode, struct page **pagep)
+ enum mcopy_atomic_mode mode, struct page **pagep,
+ bool wp_copy)
{
bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
struct inode *inode = file_inode(dst_vma->vm_file);
@@ -2438,9 +2439,18 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
}
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
- if (dst_vma->vm_flags & VM_WRITE)
- _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
- else {
+ if (dst_vma->vm_flags & VM_WRITE) {
+ if (wp_copy)
+ _dst_pte = pte_mkuffd_wp(pte_wrprotect(_dst_pte));
+ else
+ _dst_pte = pte_mkwrite(_dst_pte);
+ /*
+ * Similar reason to set_page_dirty(), that we need to mark the
+ * pte dirty even if wp_copy==true here, otherwise the pte and
+ * its page could be dropped at anytime when e.g. swapped out.
+ */
+ _dst_pte = pte_mkdirty(_dst_pte);
+ } else {
/*
* We don't set the pte dirty if the vma has no
* VM_WRITE permission, so mark the page dirty or it
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index cbb7c8d79a4d..0963e0d9ed20 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -448,7 +448,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
} else {
VM_WARN_ON_ONCE(wp_copy);
err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
- src_addr, mode, page);
+ src_addr, mode, page, wp_copy);
}
return err;
--
2.26.2
Powered by blists - more mailing lists