[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <64841617-9edd-c9ac-4742-057a7c1d81fe@csgroup.eu>
Date: Thu, 16 Dec 2021 19:21:44 +0000
From: Christophe Leroy <christophe.leroy@...roup.eu>
To: Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>
CC: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linuxppc-dev@...ts.ozlabs.org" <linuxppc-dev@...ts.ozlabs.org>
Subject: Re: [PATCH 2/2] powerpc: Simplify and move arch_randomize_brk()
Le 08/12/2021 à 15:32, Christophe Leroy a écrit :
> arch_randomize_brk() is only needed for hash on book3s/64, for other
> platforms the one provided by the default mmap layout is good enough.
>
> Move it to hash_utils.c and use randomize_page() like the generic one.
>
> And properly opt out the radix case instead of making an assumption
> on mmu_highuser_ssize.
>
> Also change to a 32M range like most other architectures instead of 8M.
>
> Signed-off-by: Christophe Leroy <christophe.leroy@...roup.eu>
> ---
> Applies on top of series "powerpc: Make hash MMU code build configurable"
I was obviously dreaming when I sent this patch.
It definitely requires CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT so
should come at the end of the other series.
>
> arch/powerpc/kernel/process.c | 41 ---------------------------
> arch/powerpc/mm/book3s64/hash_utils.c | 19 +++++++++++++
> 2 files changed, 19 insertions(+), 41 deletions(-)
>
> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index 984813a4d5dc..e7f809bdd433 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -34,10 +34,8 @@
> #include <linux/ftrace.h>
> #include <linux/kernel_stat.h>
> #include <linux/personality.h>
> -#include <linux/random.h>
> #include <linux/hw_breakpoint.h>
> #include <linux/uaccess.h>
> -#include <linux/elf-randomize.h>
> #include <linux/pkeys.h>
> #include <linux/seq_buf.h>
>
> @@ -2313,42 +2311,3 @@ unsigned long arch_align_stack(unsigned long sp)
> sp -= get_random_int() & ~PAGE_MASK;
> return sp & ~0xf;
> }
> -
> -static inline unsigned long brk_rnd(void)
> -{
> - unsigned long rnd = 0;
> -
> - /* 8MB for 32bit, 1GB for 64bit */
> - if (is_32bit_task())
> - rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
> - else
> - rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
> -
> - return rnd << PAGE_SHIFT;
> -}
> -
> -unsigned long arch_randomize_brk(struct mm_struct *mm)
> -{
> - unsigned long base = mm->brk;
> - unsigned long ret;
> -
> -#ifdef CONFIG_PPC_BOOK3S_64
> - /*
> - * If we are using 1TB segments and we are allowed to randomise
> - * the heap, we can put it above 1TB so it is backed by a 1TB
> - * segment. Otherwise the heap will be in the bottom 1TB
> - * which always uses 256MB segments and this may result in a
> - * performance penalty.
> - */
> - if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
> - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
> -#endif
> -
> - ret = PAGE_ALIGN(base + brk_rnd());
> -
> - if (ret < mm->brk)
> - return mm->brk;
> -
> - return ret;
> -}
> -
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
> index eced266dc5e9..b179a001bfa4 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -37,6 +37,8 @@
> #include <linux/cpu.h>
> #include <linux/pgtable.h>
> #include <linux/debugfs.h>
> +#include <linux/random.h>
> +#include <linux/elf-randomize.h>
>
> #include <asm/interrupt.h>
> #include <asm/processor.h>
> @@ -2185,3 +2187,20 @@ void __init print_system_hash_info(void)
> if (htab_hash_mask)
> pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
> }
> +
> +unsigned long arch_randomize_brk(struct mm_struct *mm)
> +{
> + /*
> + * If we are using 1TB segments and we are allowed to randomise
> + * the heap, we can put it above 1TB so it is backed by a 1TB
> + * segment. Otherwise the heap will be in the bottom 1TB
> + * which always uses 256MB segments and this may result in a
> + * performance penalty.
> + */
> + if (is_32bit_task())
> + return randomize_page(mm->brk, SZ_32M);
> + else if (!radix_enabled() && mmu_highuser_ssize == MMU_SEGSIZE_1T)
> + return randomize_page(max_t(unsigned long, mm->brk, SZ_1T), SZ_1G);
> + else
> + return randomize_page(mm->brk, SZ_1G);
> +}
>
Powered by blists - more mailing lists