lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 11 Oct 2021 22:31:48 +0800
From:   sxwjean@...com
To:     linux-mm@...ck.org, x86@...nel.org
Cc:     Xiongwei Song <sxwjean@...il.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        Kees Cook <keescook@...omium.org>,
        "H. Peter Anvin" <hpa@...or.com>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        Andy Lutomirski <luto@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Arnd Bergmann <arnd@...db.de>,
        Al Viro <viro@...iv.linux.org.uk>,
        Gabriel Krisman Bertazi <krisman@...labora.com>,
        Lai Jiangshan <laijs@...ux.alibaba.com>,
        Huang Rui <ray.huang@....com>,
        Yazen Ghannam <yazen.ghannam@....com>,
        Kim Phillips <kim.phillips@....com>,
        Oleg Nesterov <oleg@...hat.com>,
        Balbir Singh <sblbir@...zon.com>,
        "David S. Miller" <davem@...emloft.net>, sxwjean@...com,
        linux-kernel@...r.kernel.org
Subject: [PATCH v2 4/6] x86/mm: Randomize VA with generic arch_pick_mmap_layout()

From: Xiongwei Song <sxwjean@...il.com>

The code logic of arch_pick_mmap_layout() of x86 is basiclly same as
arch_pick_mmap_layout() where is in mm/util.c. Let's enable
ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT for x86 to use generic
arch_pick_mmap_layout(). Meanwhile, delete the arch_pick_mmap_layout()
and the related assistant functions in x86.

To verify if the entropy of mmap is kept after the patch, I did basically
test with the following c code:
	#include <stdio.h>
	#include <stdlib.h>
	#include <sys/mman.h>

	int main(int argc, char *argv[])
	{
		unsigned long *addr;

		addr = mmap(NULL, 4096, PROT_READ, MAP_SHARED|MAP_ANONYMOUS, -1, 0);
		if (addr == MAP_FAILED) {
			printf("NULL\n");
		} else {
			printf("%lx\n", (unsigned long)addr);
			munmap(addr, 4096);
		}

		return 0;
	}

Run the program above 10 thousands times to get the mmap address, please
see the results below.

Before this patch:
	    Virtual Address Range       | hit times
	----------------------------------------
	0x7f0000000000 - 0x7f0ffffff000 |   655
	0x7f1000000000 - 0x7f1ffffff000 |   617
	0x7f2000000000 - 0x7f2ffffff000 |   636
	0x7f3000000000 - 0x7f3ffffff000 |   625
	0x7f4000000000 - 0x7f4ffffff000 |   651
	0x7f5000000000 - 0x7f5ffffff000 |   591
	0x7f6000000000 - 0x7f6ffffff000 |   623
	0x7f7000000000 - 0x7f7ffffff000 |   627
	0x7f8000000000 - 0x7f8ffffff000 |   638
	0x7f9000000000 - 0x7f9ffffff000 |   586
	0x7fa000000000 - 0x7faffffff000 |   637
	0x7fb000000000 - 0x7fbffffff000 |   607
	0x7fc000000000 - 0x7fcffffff000 |   618
	0x7fd000000000 - 0x7fdffffff000 |   656
	0x7fe000000000 - 0x7feffffff000 |   614
	0x7ff000000000 - 0x7ffffffff000 |   619

After this patch:
	    Virtual Address Range       | hit times
	----------------------------------------
	0x7f0000000000 - 0x7f0ffffff000 |   661
	0x7f1000000000 - 0x7f1ffffff000 |   645
	0x7f2000000000 - 0x7f2ffffff000 |   609
	0x7f3000000000 - 0x7f3ffffff000 |   594
	0x7f4000000000 - 0x7f4ffffff000 |   616
	0x7f5000000000 - 0x7f5ffffff000 |   622
	0x7f6000000000 - 0x7f6ffffff000 |   617
	0x7f7000000000 - 0x7f7ffffff000 |   582
	0x7f8000000000 - 0x7f8ffffff000 |   618
	0x7f9000000000 - 0x7f9ffffff000 |   629
	0x7fa000000000 - 0x7faffffff000 |   635
	0x7fb000000000 - 0x7fbffffff000 |   625
	0x7fc000000000 - 0x7fcffffff000 |   614
	0x7fd000000000 - 0x7fdffffff000 |   610
	0x7fe000000000 - 0x7feffffff000 |   648
	0x7ff000000000 - 0x7ffffffff000 |   675

It looks like the result after the patch is reasonable.

Furthermore, define a is_compat_task() for x86 to fix function undefined
issue.

Add __weak attribute to generic arch_randomize_brk() to ensure the kernel
always uses the arch_randomize_brk() of x86 in x86 arch.

Signed-off-by: Xiongwei Song <sxwjean@...il.com>
---
 arch/x86/Kconfig              |   1 +
 arch/x86/include/asm/compat.h |   5 ++
 arch/x86/mm/mmap.c            | 112 ----------------------------------
 mm/util.c                     |   2 +-
 4 files changed, 7 insertions(+), 113 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a52e81cb256e..01a40b710103 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -114,6 +114,7 @@ config X86
 	select ARCH_USE_SYM_ANNOTATIONS
 	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 	select ARCH_WANT_DEFAULT_BPF_JIT	if X86_64
+	select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
 	select ARCH_WANTS_DYNAMIC_TASK_STRUCT
 	select ARCH_WANTS_NO_INSTR
 	select ARCH_WANT_HUGE_PMD_SHARE
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 7516e4199b3c..22714a202794 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -156,6 +156,11 @@ struct compat_shmid64_ds {
 	(!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
 #endif
 
+static inline int is_compat_task(void)
+{
+	return IS_ENABLED(CONFIG_COMPAT) && test_thread_flag(TIF_ADDR32);
+}
+
 static inline bool in_x32_syscall(void)
 {
 #ifdef CONFIG_X86_X32_ABI
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index c90c20904a60..daf65cc5e5b1 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -38,118 +38,6 @@ unsigned long task_size_64bit(int full_addr_space)
 	return full_addr_space ? TASK_SIZE_MAX : DEFAULT_MAP_WINDOW;
 }
 
-static unsigned long stack_maxrandom_size(unsigned long task_size)
-{
-	unsigned long max = 0;
-	if (current->flags & PF_RANDOMIZE) {
-		max = (-1UL) & __STACK_RND_MASK(task_size == task_size_32bit());
-		max <<= PAGE_SHIFT;
-	}
-
-	return max;
-}
-
-#ifdef CONFIG_COMPAT
-# define mmap32_rnd_bits  mmap_rnd_compat_bits
-# define mmap64_rnd_bits  mmap_rnd_bits
-#else
-# define mmap32_rnd_bits  mmap_rnd_bits
-# define mmap64_rnd_bits  mmap_rnd_bits
-#endif
-
-#define SIZE_128M    (128 * 1024 * 1024UL)
-
-static int mmap_is_legacy(void)
-{
-	if (current->personality & ADDR_COMPAT_LAYOUT)
-		return 1;
-
-	return sysctl_legacy_va_layout;
-}
-
-static unsigned long arch_rnd(unsigned int rndbits)
-{
-	if (!(current->flags & PF_RANDOMIZE))
-		return 0;
-	return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
-}
-
-unsigned long arch_mmap_rnd(void)
-{
-	return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
-}
-
-static unsigned long mmap_base(unsigned long rnd, unsigned long task_size,
-			       struct rlimit *rlim_stack)
-{
-	unsigned long gap = rlim_stack->rlim_cur;
-	unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
-	unsigned long gap_min, gap_max;
-
-	/* Values close to RLIM_INFINITY can overflow. */
-	if (gap + pad > gap)
-		gap += pad;
-
-	/*
-	 * Top of mmap area (just below the process stack).
-	 * Leave an at least ~128 MB hole with possible stack randomization.
-	 */
-	gap_min = SIZE_128M;
-	gap_max = (task_size / 6) * 5;
-
-	if (gap < gap_min)
-		gap = gap_min;
-	else if (gap > gap_max)
-		gap = gap_max;
-
-	return PAGE_ALIGN(task_size - gap - rnd);
-}
-
-static unsigned long mmap_legacy_base(unsigned long rnd,
-				      unsigned long task_size)
-{
-	return __TASK_UNMAPPED_BASE(task_size) + rnd;
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
-		unsigned long random_factor, unsigned long task_size,
-		struct rlimit *rlim_stack)
-{
-	*legacy_base = mmap_legacy_base(random_factor, task_size);
-	if (mmap_is_legacy())
-		*base = *legacy_base;
-	else
-		*base = mmap_base(random_factor, task_size, rlim_stack);
-}
-
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
-	if (mmap_is_legacy())
-		mm->get_unmapped_area = arch_get_unmapped_area;
-	else
-		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-
-	arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
-			arch_rnd(mmap64_rnd_bits), task_size_64bit(0),
-			rlim_stack);
-
-#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
-	/*
-	 * The mmap syscall mapping base decision depends solely on the
-	 * syscall type (64-bit or compat). This applies for 64bit
-	 * applications and 32bit applications. The 64bit syscall uses
-	 * mmap_base, the compat syscall uses mmap_compat_base.
-	 */
-	arch_pick_mmap_base(&mm->mmap_compat_base, &mm->mmap_compat_legacy_base,
-			arch_rnd(mmap32_rnd_bits), task_size_32bit(),
-			rlim_stack);
-#endif
-}
-
 unsigned long get_mmap_base(int is_legacy)
 {
 	struct mm_struct *mm = current->mm;
diff --git a/mm/util.c b/mm/util.c
index ab3711c445e6..91a26da501d3 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -344,7 +344,7 @@ unsigned long randomize_stack_top(unsigned long stack_top)
 }
 
 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
-unsigned long arch_randomize_brk(struct mm_struct *mm)
+unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
 {
 	/* Is the current task 32bit ? */
 	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ