lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231017090815.1067790-6-jeffxu@chromium.org>
Date: Tue, 17 Oct 2023 09:08:12 +0000
From: jeffxu@...omium.org
To: akpm@...ux-foundation.org,
	keescook@...omium.org,
	jannh@...gle.com,
	sroettger@...gle.com,
	willy@...radead.org,
	gregkh@...uxfoundation.org,
	torvalds@...ux-foundation.org
Cc: jeffxu@...gle.com,
	jorgelo@...omium.org,
	groeck@...omium.org,
	linux-kernel@...r.kernel.org,
	linux-kselftest@...r.kernel.org,
	linux-mm@...ck.org,
	surenb@...gle.com,
	alex.sierra@....com,
	apopple@...dia.com,
	aneesh.kumar@...ux.ibm.com,
	axelrasmussen@...gle.com,
	ben@...adent.org.uk,
	catalin.marinas@....com,
	david@...hat.com,
	dwmw@...zon.co.uk,
	ying.huang@...el.com,
	hughd@...gle.com,
	joey.gouly@....com,
	corbet@....net,
	wangkefeng.wang@...wei.com,
	Liam.Howlett@...cle.com,
	lstoakes@...il.com,
	mawupeng1@...wei.com,
	linmiaohe@...wei.com,
	namit@...are.com,
	peterx@...hat.com,
	peterz@...radead.org,
	ryan.roberts@....com,
	shr@...kernel.io,
	vbabka@...e.cz,
	xiujianfeng@...wei.com,
	yu.ma@...el.com,
	zhangpeng362@...wei.com,
	dave.hansen@...el.com,
	luto@...nel.org,
	linux-hardening@...r.kernel.org
Subject: [RFC PATCH v2 5/8] mseal: Check seal flag for munmap(2)

From: Jeff Xu <jeffxu@...gle.com>

munmap(2) unmap VMAs in the given address range.
Sealing will prevent unintended munmap(2) call.

What this patch does:
When a munmap(2) is invoked, if one of its VMAs has MM_SEAL_MUNMAP
set from previous mseal(2) call, this munmap(2) will fail,
without any VMA modified.

This patch is based on following:
1. At syscall entry point: SYSCALL_DEFINE2(munmap, ...)
Pass checkSeals = MM_SEAL_MUNMAP into __vm_munmap(),
in turn, to do_vmi_munmap().

Of all the call paths that call into do_vmi_munmap(),
this is the only place where checkSeals = MM_SEAL_MUNMAP.
The rest has checkSeals = 0.

2. In do_vmi_munmap(), calls can_modify_mm() before any
update is made to VMAs.

Signed-off-by: Jeff Xu <jeffxu@...gle.com>
---
 include/linux/mm.h |  2 +-
 mm/mmap.c          | 21 +++++++++++++--------
 mm/mremap.c        |  5 +++--
 3 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index b09df8501987..f2f316522f2a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3279,7 +3279,7 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
 	unsigned long pgoff, unsigned long *populate, struct list_head *uf);
 extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
 			 unsigned long start, size_t len, struct list_head *uf,
-			 bool unlock);
+			 bool unlock, unsigned long checkSeals);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
 		     struct list_head *uf);
 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
diff --git a/mm/mmap.c b/mm/mmap.c
index 414ac31aa9fa..62d592f16f45 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2601,6 +2601,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
  * @len: The length of the range to munmap
  * @uf: The userfaultfd list_head
  * @unlock: set to true if the user wants to drop the mmap_lock on success
+ * @checkSeals: seal type to check.
  *
  * This function takes a @mas that is either pointing to the previous VMA or set
  * to MA_START and sets it up to remove the mapping(s).  The @len will be
@@ -2611,7 +2612,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
  */
 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
 		  unsigned long start, size_t len, struct list_head *uf,
-		  bool unlock)
+		  bool unlock, unsigned long checkSeals)
 {
 	unsigned long end;
 	struct vm_area_struct *vma;
@@ -2623,6 +2624,9 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
 	if (end == start)
 		return -EINVAL;
 
+	if (!can_modify_mm(mm, start, end, checkSeals))
+		return -EACCES;
+
 	 /* arch_unmap() might do unmaps itself.  */
 	arch_unmap(mm, start, end);
 
@@ -2650,7 +2654,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 {
 	VMA_ITERATOR(vmi, mm, start);
 
-	return do_vmi_munmap(&vmi, mm, start, len, uf, false);
+	return do_vmi_munmap(&vmi, mm, start, len, uf, false, 0);
 }
 
 unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2684,7 +2688,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	}
 
 	/* Unmap any existing mapping in the area */
-	if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
+	if (do_vmi_munmap(&vmi, mm, addr, len, uf, false, 0))
 		return -ENOMEM;
 
 	/*
@@ -2909,7 +2913,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	return error;
 }
 
-static int __vm_munmap(unsigned long start, size_t len, bool unlock)
+static int __vm_munmap(unsigned long start, size_t len, bool unlock,
+			unsigned long checkSeals)
 {
 	int ret;
 	struct mm_struct *mm = current->mm;
@@ -2919,7 +2924,7 @@ static int __vm_munmap(unsigned long start, size_t len, bool unlock)
 	if (mmap_write_lock_killable(mm))
 		return -EINTR;
 
-	ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
+	ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock, checkSeals);
 	if (ret || !unlock)
 		mmap_write_unlock(mm);
 
@@ -2929,14 +2934,14 @@ static int __vm_munmap(unsigned long start, size_t len, bool unlock)
 
 int vm_munmap(unsigned long start, size_t len)
 {
-	return __vm_munmap(start, len, false);
+	return __vm_munmap(start, len, false, 0);
 }
 EXPORT_SYMBOL(vm_munmap);
 
 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
 {
 	addr = untagged_addr(addr);
-	return __vm_munmap(addr, len, true);
+	return __vm_munmap(addr, len, true, MM_SEAL_MUNMAP);
 }
 
 
@@ -3168,7 +3173,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
 	if (ret)
 		goto limits_failed;
 
-	ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
+	ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0, 0);
 	if (ret)
 		goto munmap_failed;
 
diff --git a/mm/mremap.c b/mm/mremap.c
index 056478c106ee..ac363937f8c4 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -715,7 +715,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 	}
 
 	vma_iter_init(&vmi, mm, old_addr);
-	if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) {
+	if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false,
+				0)) {
 		/* OOM: unable to split vma, just get accounts right */
 		if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
 			vm_acct_memory(old_len >> PAGE_SHIFT);
@@ -1009,7 +1010,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 		}
 
 		ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
-				    &uf_unmap, true);
+				    &uf_unmap, true, 0);
 		if (ret)
 			goto out;
 
-- 
2.42.0.655.g421f12c284-goog


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ