lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170116123310.22697-4-dsafonov@virtuozzo.com>
Date:   Mon, 16 Jan 2017 15:33:08 +0300
From:   Dmitry Safonov <dsafonov@...tuozzo.com>
To:     <linux-kernel@...r.kernel.org>
CC:     <0x7f454c46@...il.com>, Dmitry Safonov <dsafonov@...tuozzo.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H. Peter Anvin" <hpa@...or.com>,
        Andy Lutomirski <luto@...nel.org>,
        Borislav Petkov <bp@...e.de>, <x86@...nel.org>,
        <linux-mm@...ck.org>
Subject: [PATCHv2 3/5] x86/mm: fix native mmap() in compat bins and vice-versa

Fix 32-bit compat_sys_mmap() mapping VMA over 4Gb in 64-bit binaries
and 64-bit sys_mmap() mapping VMA only under 4Gb in 32-bit binaries.
Changed arch_get_unmapped_area{,_topdown}() to recompute mmap_base
for those cases and use according high/low limits for vm_unmapped_area()
The recomputing of mmap_base may make compat sys_mmap() in 64-bit
binaries a little slower than native, which uses already known from exec
time mmap_base - but, as it returned buggy address, that case seemed
unused previously, so no performance degradation for already used ABI.
Can be optimized in future by introducing mmap_compat_{,legacy}_base
in mm_struct.

I discovered that bug on ZDTM tests for compat 32-bit C/R.
Working compat sys_mmap() in 64-bit binaries is really needed for that
purpose, as 32-bit applications are restored from 64-bit CRIU binary.

Signed-off-by: Dmitry Safonov <dsafonov@...tuozzo.com>
---
 arch/x86/kernel/sys_x86_64.c | 44 +++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 41 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index a55ed63b9f91..1bf90cd1400c 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -113,10 +113,31 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
 		if (current->flags & PF_RANDOMIZE) {
 			*begin = randomize_page(*begin, 0x02000000);
 		}
+		return;
+	}
+
+	if (!test_thread_flag(TIF_ADDR32)) {
+#ifdef CONFIG_COMPAT
+		/* 64-bit native binary doing compat 32-bit syscall */
+		if (in_compat_syscall()) {
+			*begin = mmap_legacy_base(arch_compat_rnd(),
+						IA32_PAGE_OFFSET);
+			*end = IA32_PAGE_OFFSET;
+			return;
+		}
+#endif
 	} else {
-		*begin = current->mm->mmap_legacy_base;
-		*end = TASK_SIZE;
+		/* 32-bit binary doing 64-bit syscall */
+		if (!in_compat_syscall()) {
+			*begin = mmap_legacy_base(arch_native_rnd(),
+						IA32_PAGE_OFFSET);
+			*end = TASK_SIZE_MAX;
+			return;
+		}
 	}
+
+	*begin = current->mm->mmap_legacy_base;
+	*end = TASK_SIZE;
 }
 
 unsigned long
@@ -157,6 +178,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 	return vm_unmapped_area(&info);
 }
 
+static unsigned long find_top(void)
+{
+	if (!test_thread_flag(TIF_ADDR32)) {
+#ifdef CONFIG_COMPAT
+		/* 64-bit native binary doing compat 32-bit syscall */
+		if (in_compat_syscall())
+			return mmap_base(arch_compat_rnd(), IA32_PAGE_OFFSET);
+#endif
+	} else {
+		/* 32-bit binary doing 64-bit syscall */
+		if (!in_compat_syscall())
+			return mmap_base(arch_native_rnd(), TASK_SIZE_MAX);
+	}
+
+	return current->mm->mmap_base;
+}
+
 unsigned long
 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 			  const unsigned long len, const unsigned long pgoff,
@@ -190,7 +228,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 	info.length = len;
 	info.low_limit = PAGE_SIZE;
-	info.high_limit = mm->mmap_base;
+	info.high_limit = find_top();
 	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
 	if (filp) {
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ