lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210427161317.50682-2-peterx@redhat.com>
Date:   Tue, 27 Apr 2021 12:12:54 -0400
From:   Peter Xu <peterx@...hat.com>
To:     linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc:     Nadav Amit <nadav.amit@...il.com>,
        Miaohe Lin <linmiaohe@...wei.com>,
        Mike Rapoport <rppt@...ux.vnet.ibm.com>,
        Andrea Arcangeli <aarcange@...hat.com>,
        Hugh Dickins <hughd@...gle.com>, peterx@...hat.com,
        Jerome Glisse <jglisse@...hat.com>,
        Mike Kravetz <mike.kravetz@...cle.com>,
        Jason Gunthorpe <jgg@...pe.ca>,
        Matthew Wilcox <willy@...radead.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Axel Rasmussen <axelrasmussen@...gle.com>,
        "Kirill A . Shutemov" <kirill@...temov.name>
Subject: [PATCH v2 01/24] shmem/userfaultfd: Take care of UFFDIO_COPY_MODE_WP

Firstly, pass wp_copy into shmem_mfill_atomic_pte() through the stack.
Then apply the UFFD_WP bit properly when the UFFDIO_COPY on shmem is with
UFFDIO_COPY_MODE_WP.

One thing to mention is that shmem_mfill_atomic_pte() needs to set the dirty
bit in pte even if UFFDIO_COPY_MODE_WP is set.  The reason is similar to
dcf7fe9d8976 ("userfaultfd: shmem: UFFDIO_COPY: set the page dirty if VM_WRITE
is not set") where we need to set page as dirty even if VM_WRITE is no there.
It's just that shmem can drop the pte any time later, and if it's not dirty the
data will be dropped.  For uffd-wp, that could lead to data loss if without the
dirty bit set.

Note that shmem_mfill_zeropage_pte() will always call shmem_mfill_atomic_pte()
with wp_copy==false because UFFDIO_ZEROCOPY does not support UFFDIO_COPY_MODE_WP.

Signed-off-by: Peter Xu <peterx@...hat.com>
---
 include/linux/shmem_fs.h |  5 +++--
 mm/shmem.c               | 27 ++++++++++++++++++++-------
 mm/userfaultfd.c         |  2 +-
 3 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index d82b6f3965885..a21eb25183d02 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -127,14 +127,15 @@ extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
 				  struct vm_area_struct *dst_vma,
 				  unsigned long dst_addr,
 				  unsigned long src_addr,
-				  struct page **pagep);
+				  struct page **pagep,
+				  bool wp_copy);
 extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
 				    pmd_t *dst_pmd,
 				    struct vm_area_struct *dst_vma,
 				    unsigned long dst_addr);
 #else
 #define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
-			       src_addr, pagep)        ({ BUG(); 0; })
+			       src_addr, pagep, wp_copy)    ({ BUG(); 0; })
 #define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
 				 dst_addr)      ({ BUG(); 0; })
 #endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 26c76b13ad233..8fbf7680f044c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2360,7 +2360,8 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
 				  unsigned long dst_addr,
 				  unsigned long src_addr,
 				  bool zeropage,
-				  struct page **pagep)
+				  struct page **pagep,
+				  bool wp_copy)
 {
 	struct inode *inode = file_inode(dst_vma->vm_file);
 	struct shmem_inode_info *info = SHMEM_I(inode);
@@ -2422,9 +2423,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
 		goto out_release;
 
 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
-	if (dst_vma->vm_flags & VM_WRITE)
-		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
-	else {
+	if (dst_vma->vm_flags & VM_WRITE) {
+		if (wp_copy)
+			_dst_pte = pte_mkuffd_wp(pte_wrprotect(_dst_pte));
+		else
+			_dst_pte = pte_mkwrite(_dst_pte);
+		/*
+		 * Similar reason to set_page_dirty(), that we need to mark the
+		 * pte dirty even if wp_copy==true here, otherwise the pte and
+		 * its page could be dropped at anytime when e.g. swapped out.
+		 */
+		_dst_pte = pte_mkdirty(_dst_pte);
+	} else {
 		/*
 		 * We don't set the pte dirty if the vma has no
 		 * VM_WRITE permission, so mark the page dirty or it
@@ -2482,10 +2492,12 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
 			   struct vm_area_struct *dst_vma,
 			   unsigned long dst_addr,
 			   unsigned long src_addr,
-			   struct page **pagep)
+			   struct page **pagep,
+			   bool wp_copy)
 {
 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
-				      dst_addr, src_addr, false, pagep);
+				      dst_addr, src_addr, false, pagep,
+				      wp_copy);
 }
 
 int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
@@ -2496,7 +2508,8 @@ int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
 	struct page *page = NULL;
 
 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
-				      dst_addr, 0, true, &page);
+				      dst_addr, 0, true, &page,
+				      false);
 }
 
 #ifdef CONFIG_TMPFS
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index e14b3820c6a81..7adaebe222b8e 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -443,7 +443,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
 		if (!zeropage)
 			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
 						     dst_vma, dst_addr,
-						     src_addr, page);
+						     src_addr, page, wp_copy);
 		else
 			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
 						       dst_vma, dst_addr);
-- 
2.26.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ