lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20220402094550.129-1-lipeifeng@oppo.com>
Date:   Sat,  2 Apr 2022 17:45:50 +0800
From:   lipeifeng@...o.com
To:     akpm@...ux-foundation.org
Cc:     peifeng55@...il.com, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, 21cnbao@...il.com,
        zhangshiming@...o.com, lipeifeng <lipeifeng@...o.com>
Subject: [PATCH] mm: modify the method to search addr in unmapped_area_topdown

From: lipeifeng <lipeifeng@...o.com>

The old method will firstly find the space in len(info->length
+ info->align_mask), and get address at the desired alignment.

Sometime, addr  would be failed if there are enough
addr space in kernel by above method, e.g., you can't get a
addr sized in 1Mbytes, align_mask 1Mbytes successfully although
there are still (2M-1)bytes space in kernel.

This patch would fix thr problem above by the new method: find the
space in info->length and judge if at the desired info->align_mask
at the same time.

Do a simple test in TIF_32BIT:
- Try to malloc (size:1M align:2M) until allocation fails;
- Try to malloc (size:1M align:1M) and account how to space can be
alloced successfully.

Before optimization: alloced 1.9G+ bytes.
After  optimization: alloced 0     bytes.

Signed-off-by: lipeifeng <lipeifeng@...o.com>
---
 mm/mmap.c | 20 +++++++++++++++-----
 1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/mm/mmap.c b/mm/mmap.c
index f61a154..30e33d3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2002,13 +2002,14 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
-	unsigned long length, low_limit, high_limit, gap_start, gap_end;
+	unsigned long length, low_limit, high_limit, gap_start, gap_end, gap_end_tmp;
 
 	/* Adjust search length to account for worst case alignment overhead */
 	length = info->length + info->align_mask;
 	if (length < info->length)
 		return -ENOMEM;
 
+	length = info->length;
 	/*
 	 * Adjust search limits by the desired length.
 	 * See implementation comment at top of unmapped_area().
@@ -2024,8 +2025,12 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 
 	/* Check highest gap, which does not precede any rbtree node */
 	gap_start = mm->highest_vm_end;
-	if (gap_start <= high_limit)
-		goto found_highest;
+	if (gap_start <= high_limit) {
+		gap_end_tmp = gap_end - info->length;
+		gap_end_tmp -= (gap_end_tmp - info->align_offset) & info->align_mask;
+		if (gap_end_tmp >= gap_start)
+			goto found_highest;
+	}
 
 	/* Check if rbtree root looks promising */
 	if (RB_EMPTY_ROOT(&mm->mm_rb))
@@ -2053,8 +2058,13 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 		if (gap_end < low_limit)
 			return -ENOMEM;
 		if (gap_start <= high_limit &&
-		    gap_end > gap_start && gap_end - gap_start >= length)
-			goto found;
+		    gap_end > gap_start && gap_end - gap_start >= length) {
+			gap_end_tmp = gap_end - info->length;
+			gap_end_tmp -= (gap_end_tmp - info->align_offset) & info->align_mask;
+			if (gap_end_tmp >= gap_start)
+				goto found;
+
+		}
 
 		/* Visit left subtree if it looks promising */
 		if (vma->vm_rb.rb_left) {
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ