[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87plpk5a4k.fsf@mail.lhotse>
Date: Wed, 04 Sep 2024 14:13:47 +1000
From: Michael Ellerman <mpe@...erman.id.au>
To: Mark Brown <broonie@...nel.org>, Richard Henderson
<richard.henderson@...aro.org>, Ivan Kokshaysky
<ink@...assic.park.msu.ru>, Matt Turner <mattst88@...il.com>, Vineet Gupta
<vgupta@...nel.org>, Russell King <linux@...linux.org.uk>, Guo Ren
<guoren@...nel.org>, Huacai Chen <chenhuacai@...nel.org>, WANG Xuerui
<kernel@...0n.name>, "James E.J. Bottomley"
<James.Bottomley@...senPartnership.com>, Helge Deller <deller@....de>,
Nicholas Piggin <npiggin@...il.com>, Christophe Leroy
<christophe.leroy@...roup.eu>, Naveen N Rao <naveen@...nel.org>, Alexander
Gordeev <agordeev@...ux.ibm.com>, Gerald Schaefer
<gerald.schaefer@...ux.ibm.com>, Heiko Carstens <hca@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>, Christian Borntraeger
<borntraeger@...ux.ibm.com>, Sven Schnelle <svens@...ux.ibm.com>,
Yoshinori Sato <ysato@...rs.sourceforge.jp>, Rich Felker
<dalias@...c.org>, John Paul Adrian Glaubitz
<glaubitz@...sik.fu-berlin.de>, "David S. Miller" <davem@...emloft.net>,
Andreas Larsson <andreas@...sler.com>, Thomas Gleixner
<tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov
<bp@...en8.de>, Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>, Chris Zankel <chris@...kel.net>, Max
Filippov <jcmvbkbc@...il.com>, Andrew Morton <akpm@...ux-foundation.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>, Vlastimil Babka
<vbabka@...e.cz>, Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Cc: Catalin Marinas <catalin.marinas@....com>, Will Deacon
<will@...nel.org>, Deepak Gupta <debug@...osinc.com>,
linux-arm-kernel@...ts.infradead.org, linux-alpha@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-snps-arc@...ts.infradead.org,
linux-arm-kernel@...ts.infradead.org, linux-csky@...r.kernel.org,
loongarch@...ts.linux.dev, linux-parisc@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, linux-s390@...r.kernel.org,
linux-sh@...r.kernel.org, sparclinux@...r.kernel.org, linux-mm@...ck.org,
Mark Brown <broonie@...nel.org>
Subject: Re: [PATCH 2/3] mm: Pass vm_flags to generic_get_unmapped_area()
Mark Brown <broonie@...nel.org> writes:
> In preparation for using vm_flags to ensure guard pages for shadow stacks
> supply them as an argument to generic_get_unmapped_area(). The only user
> outside of the core code is the PowerPC book3s64 implementation which is
> trivially wrapping the generic implementation in the radix_enabled() case.
>
> Signed-off-by: Mark Brown <broonie@...nel.org>
> ---
> arch/powerpc/mm/book3s64/slice.c | 4 ++--
> include/linux/sched/mm.h | 4 ++--
> mm/mmap.c | 10 ++++++----
> 3 files changed, 10 insertions(+), 8 deletions(-)
Acked-by: Michael Ellerman <mpe@...erman.id.au> (powerpc)
cheers
> diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
> index ada6bf896ef8..87307d0fc3b8 100644
> --- a/arch/powerpc/mm/book3s64/slice.c
> +++ b/arch/powerpc/mm/book3s64/slice.c
> @@ -641,7 +641,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
> vm_flags_t vm_flags)
> {
> if (radix_enabled())
> - return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
>
> return slice_get_unmapped_area(addr, len, flags,
> mm_ctx_user_psize(¤t->mm->context), 0);
> @@ -655,7 +655,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
> vm_flags_t vm_flags)
> {
> if (radix_enabled())
> - return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
> + return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags);
>
> return slice_get_unmapped_area(addr0, len, flags,
> mm_ctx_user_psize(¤t->mm->context), 1);
> diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
> index c4d34abc45d4..07bb8d4181d7 100644
> --- a/include/linux/sched/mm.h
> +++ b/include/linux/sched/mm.h
> @@ -204,11 +204,11 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
> unsigned long
> generic_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags);
> + unsigned long flags, vm_flags_t vm_flags);
> unsigned long
> generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags);
> + unsigned long flags, vm_flags_t vm_flags);
> #else
> static inline void arch_pick_mmap_layout(struct mm_struct *mm,
> struct rlimit *rlim_stack) {}
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 7528146f886f..b06ba847c96e 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -1789,7 +1789,7 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
> unsigned long
> generic_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags)
> + unsigned long flags, vm_flags_t vm_flags)
> {
> struct mm_struct *mm = current->mm;
> struct vm_area_struct *vma, *prev;
> @@ -1823,7 +1823,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> unsigned long flags, vm_flags_t vm_flags)
> {
> - return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
> + vm_flags);
> }
> #endif
>
> @@ -1834,7 +1835,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
> unsigned long
> generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> - unsigned long flags)
> + unsigned long flags, vm_flags_t vm_flags)
> {
> struct vm_area_struct *vma, *prev;
> struct mm_struct *mm = current->mm;
> @@ -1887,7 +1888,8 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
> unsigned long len, unsigned long pgoff,
> unsigned long flags, vm_flags_t vm_flags)
> {
> - return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
> + return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
> + vm_flags);
> }
> #endif
>
>
> --
> 2.39.2
Powered by blists - more mailing lists