lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20220617134637.1771711-1-Liam.Howlett@oracle.com>
Date:   Fri, 17 Jun 2022 13:46:42 +0000
From:   Liam Howlett <liam.howlett@...cle.com>
To:     "maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>,
        "linux-mm@...ck.org" <linux-mm@...ck.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Qian Cai <quic_qiancai@...cinc.com>
CC:     Yu Zhao <yuzhao@...gle.com>
Subject: [PATCH] mm/mmap: Change do_mas_align_munmap() to avoid preallocations
 for sidetree

Recording the VMAs to be removed in the sidetree does not require a
preallocation - after all, split allocates with GFP_KERNEL.  Changing to
a regular maple tree write means we can avoid issues when there are a
large number of VMAs.  Using mas_store_gfp() instead of preallocations
also means that the maple state does not need to be destroyed (freeing
unused nodes).  At the same time, switch the tree flags to just
MT_FLAGS_LOCK_EXTERN since gaps do not need to be tracked in the
side tree.  This will allow more VMAs per node.

Also reorganize the goto statements and split them up for better
unwinding.

Fixes: e34b4addc263 (mm/mmap: fix potential leak on do_mas_align_munmap())
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
 mm/mmap.c | 39 +++++++++++++++++++++------------------
 1 file changed, 21 insertions(+), 18 deletions(-)

diff --git a/mm/mmap.c b/mm/mmap.c
index 315c3ca118cb..58efd5723df7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2335,13 +2335,17 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	return __split_vma(mm, vma, addr, new_below);
 }
 
-static inline void munmap_sidetree(struct vm_area_struct *vma,
+static inline int munmap_sidetree(struct vm_area_struct *vma,
 				   struct ma_state *mas_detach)
 {
 	mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
-	mas_store(mas_detach, vma);
+	if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
+		return -ENOMEM;
+
 	if (vma->vm_flags & VM_LOCKED)
 		vma->vm_mm->locked_vm -= vma_pages(vma);
+
+	return 0;
 }
 
 /*
@@ -2365,16 +2369,13 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 	struct maple_tree mt_detach;
 	int count = 0;
 	int error = -ENOMEM;
-	MA_STATE(mas_detach, &mt_detach, start, end - 1);
-	mt_init_flags(&mt_detach, MM_MT_FLAGS);
+	MA_STATE(mas_detach, &mt_detach, 0, 0);
+	mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN);
 	mt_set_external_lock(&mt_detach, &mm->mmap_lock);
 
 	if (mas_preallocate(mas, vma, GFP_KERNEL))
 		return -ENOMEM;
 
-	if (mas_preallocate(&mas_detach, vma, GFP_KERNEL))
-		goto detach_alloc_fail;
-
 	mas->last = end - 1;
 	/*
 	 * If we need to split any vma, do it now to save pain later.
@@ -2401,7 +2402,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 		 */
 		error = __split_vma(mm, vma, start, 0);
 		if (error)
-			goto split_failed;
+			goto start_split_failed;
 
 		mas_set(mas, start);
 		vma = mas_walk(mas);
@@ -2422,26 +2423,28 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 
 			error = __split_vma(mm, next, end, 1);
 			if (error)
-				goto split_failed;
+				goto end_split_failed;
 
 			mas_set(mas, end);
 			split = mas_prev(mas, 0);
-			munmap_sidetree(split, &mas_detach);
+			if (munmap_sidetree(split, &mas_detach))
+				goto munmap_sidetree_failed;
+
 			count++;
 			if (vma == next)
 				vma = split;
 			break;
 		}
+		if (munmap_sidetree(next, &mas_detach))
+			goto munmap_sidetree_failed;
+
 		count++;
-		munmap_sidetree(next, &mas_detach);
 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
 		BUG_ON(next->vm_start < start);
 		BUG_ON(next->vm_start > end);
 #endif
 	}
 
-	mas_destroy(&mas_detach);
-
 	if (!next)
 		next = mas_next(mas, ULONG_MAX);
 
@@ -2502,18 +2505,18 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 	/* Statistics and freeing VMAs */
 	mas_set(&mas_detach, start);
 	remove_mt(mm, &mas_detach);
-	validate_mm(mm);
 	__mt_destroy(&mt_detach);
 
 
 	validate_mm(mm);
 	return downgrade ? 1 : 0;
 
-map_count_exceeded:
-split_failed:
 userfaultfd_error:
-	mas_destroy(&mas_detach);
-detach_alloc_fail:
+munmap_sidetree_failed:
+end_split_failed:
+	__mt_destroy(&mt_detach);
+start_split_failed:
+map_count_exceeded:
 	mas_destroy(mas);
 	return error;
 }
-- 
2.35.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ