[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <201905291312.A4D2DEE4@keescook>
Date: Wed, 29 May 2019 13:12:42 -0700
From: Kees Cook <keescook@...omium.org>
To: Alexandre Ghiti <alex@...ti.fr>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Christoph Hellwig <hch@....de>,
Russell King <linux@...linux.org.uk>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
Ralf Baechle <ralf@...ux-mips.org>,
Paul Burton <paul.burton@...s.com>,
James Hogan <jhogan@...nel.org>,
Palmer Dabbelt <palmer@...ive.com>,
Albert Ou <aou@...s.berkeley.edu>,
Alexander Viro <viro@...iv.linux.org.uk>,
Luis Chamberlain <mcgrof@...nel.org>,
linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-mips@...r.kernel.org, linux-riscv@...ts.infradead.org,
linux-fsdevel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH v4 13/14] mips: Use generic mmap top-down layout and brk
randomization
On Sun, May 26, 2019 at 09:47:45AM -0400, Alexandre Ghiti wrote:
> mips uses a top-down layout by default that exactly fits the generic
> functions, so get rid of arch specific code and use the generic version
> by selecting ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT.
> As ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT selects ARCH_HAS_ELF_RANDOMIZE,
> use the generic version of arch_randomize_brk since it also fits.
> Note that this commit also removes the possibility for mips to have elf
> randomization and no MMU: without MMU, the security added by randomization
> is worth nothing.
>
> Signed-off-by: Alexandre Ghiti <alex@...ti.fr>
Reviewed-by: Kees Cook <keescook@...omium.org>
-Kees
> ---
> arch/mips/Kconfig | 2 +-
> arch/mips/include/asm/processor.h | 5 --
> arch/mips/mm/mmap.c | 96 -------------------------------
> 3 files changed, 1 insertion(+), 102 deletions(-)
>
> diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
> index 70d3200476bf..da15b02bbe23 100644
> --- a/arch/mips/Kconfig
> +++ b/arch/mips/Kconfig
> @@ -5,7 +5,6 @@ config MIPS
> select ARCH_32BIT_OFF_T if !64BIT
> select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
> select ARCH_CLOCKSOURCE_DATA
> - select ARCH_HAS_ELF_RANDOMIZE
> select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
> select ARCH_HAS_UBSAN_SANITIZE_ALL
> select ARCH_SUPPORTS_UPROBES
> @@ -13,6 +12,7 @@ config MIPS
> select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
> select ARCH_USE_QUEUED_RWLOCKS
> select ARCH_USE_QUEUED_SPINLOCKS
> + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
> select ARCH_WANT_IPC_PARSE_VERSION
> select BUILDTIME_EXTABLE_SORT
> select CLONE_BACKWARDS
> diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
> index aca909bd7841..fba18d4a9190 100644
> --- a/arch/mips/include/asm/processor.h
> +++ b/arch/mips/include/asm/processor.h
> @@ -29,11 +29,6 @@
>
> extern unsigned int vced_count, vcei_count;
>
> -/*
> - * MIPS does have an arch_pick_mmap_layout()
> - */
> -#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
> -
> #ifdef CONFIG_32BIT
> #ifdef CONFIG_KVM_GUEST
> /* User space process size is limited to 1GB in KVM Guest Mode */
> diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
> index 900670ea8531..c2effe535484 100644
> --- a/arch/mips/mm/mmap.c
> +++ b/arch/mips/mm/mmap.c
> @@ -16,49 +16,10 @@
> #include <linux/random.h>
> #include <linux/sched/signal.h>
> #include <linux/sched/mm.h>
> -#include <linux/sizes.h>
> -#include <linux/compat.h>
>
> unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
> EXPORT_SYMBOL(shm_align_mask);
>
> -/* gap between mmap and stack */
> -#define MIN_GAP (128*1024*1024UL)
> -#define MAX_GAP ((STACK_TOP)/6*5)
> -#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
> -
> -static int mmap_is_legacy(struct rlimit *rlim_stack)
> -{
> - if (current->personality & ADDR_COMPAT_LAYOUT)
> - return 1;
> -
> - if (rlim_stack->rlim_cur == RLIM_INFINITY)
> - return 1;
> -
> - return sysctl_legacy_va_layout;
> -}
> -
> -static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
> -{
> - unsigned long gap = rlim_stack->rlim_cur;
> - unsigned long pad = stack_guard_gap;
> -
> - /* Account for stack randomization if necessary */
> - if (current->flags & PF_RANDOMIZE)
> - pad += (STACK_RND_MASK << PAGE_SHIFT);
> -
> - /* Values close to RLIM_INFINITY can overflow. */
> - if (gap + pad > gap)
> - gap += pad;
> -
> - if (gap < MIN_GAP)
> - gap = MIN_GAP;
> - else if (gap > MAX_GAP)
> - gap = MAX_GAP;
> -
> - return PAGE_ALIGN(STACK_TOP - gap - rnd);
> -}
> -
> #define COLOUR_ALIGN(addr, pgoff) \
> ((((addr) + shm_align_mask) & ~shm_align_mask) + \
> (((pgoff) << PAGE_SHIFT) & shm_align_mask))
> @@ -156,63 +117,6 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
> addr0, len, pgoff, flags, DOWN);
> }
>
> -unsigned long arch_mmap_rnd(void)
> -{
> - unsigned long rnd;
> -
> -#ifdef CONFIG_COMPAT
> - if (TASK_IS_32BIT_ADDR)
> - rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
> - else
> -#endif /* CONFIG_COMPAT */
> - rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
> -
> - return rnd << PAGE_SHIFT;
> -}
> -
> -void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
> -{
> - unsigned long random_factor = 0UL;
> -
> - if (current->flags & PF_RANDOMIZE)
> - random_factor = arch_mmap_rnd();
> -
> - if (mmap_is_legacy(rlim_stack)) {
> - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
> - mm->get_unmapped_area = arch_get_unmapped_area;
> - } else {
> - mm->mmap_base = mmap_base(random_factor, rlim_stack);
> - mm->get_unmapped_area = arch_get_unmapped_area_topdown;
> - }
> -}
> -
> -static inline unsigned long brk_rnd(void)
> -{
> - unsigned long rnd = get_random_long();
> -
> - rnd = rnd << PAGE_SHIFT;
> - /* 32MB for 32bit, 1GB for 64bit */
> - if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
> - rnd = rnd & SZ_32M;
> - else
> - rnd = rnd & SZ_1G;
> -
> - return rnd;
> -}
> -
> -unsigned long arch_randomize_brk(struct mm_struct *mm)
> -{
> - unsigned long base = mm->brk;
> - unsigned long ret;
> -
> - ret = PAGE_ALIGN(base + brk_rnd());
> -
> - if (ret < mm->brk)
> - return mm->brk;
> -
> - return ret;
> -}
> -
> int __virt_addr_valid(const volatile void *kaddr)
> {
> return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
> --
> 2.20.1
>
--
Kees Cook
Powered by blists - more mailing lists