[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <0de0e896-2c4b-55e5-9860-07227a5d3ef6@arm.com>
Date: Sun, 5 Jun 2022 15:49:32 +0530
From: Anshuman Khandual <anshuman.khandual@....com>
To: Christophe Leroy <christophe.leroy@...roup.eu>,
"linux-mm@...ck.org" <linux-mm@...ck.org>
Cc: Catalin Marinas <catalin.marinas@....com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Paul Mackerras <paulus@...ba.org>,
"sparclinux@...r.kernel.org" <sparclinux@...r.kernel.org>,
Will Deacon <will@...nel.org>, Jonas Bonn <jonas@...thpole.se>,
"linux-s390@...r.kernel.org" <linux-s390@...r.kernel.org>,
"x86@...nel.org" <x86@...nel.org>,
"linux-csky@...r.kernel.org" <linux-csky@...r.kernel.org>,
Ingo Molnar <mingo@...hat.com>,
Geert Uytterhoeven <geert@...ux-m68k.org>,
Vasily Gorbik <gor@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>,
"openrisc@...ts.librecores.org" <openrisc@...ts.librecores.org>,
Thomas Gleixner <tglx@...utronix.de>,
"linux-arm-kernel@...ts.infradead.org"
<linux-arm-kernel@...ts.infradead.org>,
Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
"linux-mips@...r.kernel.org" <linux-mips@...r.kernel.org>,
Dinh Nguyen <dinguyen@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"linuxppc-dev@...ts.ozlabs.org" <linuxppc-dev@...ts.ozlabs.org>,
"David S. Miller" <davem@...emloft.net>
Subject: Re: [PATCH 1/6] mm/mmap: Restrict generic protection_map[] array
visibility
On 6/3/22 17:48, Christophe Leroy wrote:
>
>
> Le 03/06/2022 à 12:14, Anshuman Khandual a écrit :
>> Restrict generic protection_map[] array visibility only for platforms which
>> do not enable ARCH_HAS_VM_GET_PAGE_PROT. For other platforms that do define
>> their own vm_get_page_prot() enabling ARCH_HAS_VM_GET_PAGE_PROT, could have
>> their private static protection_map[] still implementing an array look up.
>> These private protection_map[] array could do without __PXXX/__SXXX macros,
>> making them redundant and dropping them off.
>>
>> But platforms which do not define their custom vm_get_page_prot() enabling
>> ARCH_HAS_VM_GET_PAGE_PROT, will still have to provide __PXXX/__SXXX macros.
>> Although this now provides a method for other willing platforms to drop off
>> __PXXX/__SXX macros completely.
>>
>> Cc: Catalin Marinas <catalin.marinas@....com>
>> Cc: Will Deacon <will@...nel.org>
>> Cc: Michael Ellerman <mpe@...erman.id.au>
>> Cc: Paul Mackerras <paulus@...ba.org>
>> Cc: "David S. Miller" <davem@...emloft.net>
>> Cc: Thomas Gleixner <tglx@...utronix.de>
>> Cc: Ingo Molnar <mingo@...hat.com>
>> Cc: Andrew Morton <akpm@...ux-foundation.org>
>> Cc: x86@...nel.org
>> Cc: linux-arm-kernel@...ts.infradead.org
>> Cc: linuxppc-dev@...ts.ozlabs.org
>> Cc: sparclinux@...r.kernel.org
>> Cc: linux-mm@...ck.org
>> Cc: linux-kernel@...r.kernel.org
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@....com>
>> ---
>> arch/arm64/include/asm/pgtable-prot.h | 18 ------------------
>> arch/arm64/mm/mmap.c | 21 +++++++++++++++++++++
>> arch/powerpc/include/asm/pgtable.h | 2 ++
>> arch/powerpc/mm/book3s64/pgtable.c | 20 ++++++++++++++++++++
>> arch/sparc/include/asm/pgtable_32.h | 2 ++
>> arch/sparc/include/asm/pgtable_64.h | 19 -------------------
>> arch/sparc/mm/init_64.c | 20 ++++++++++++++++++++
>> arch/x86/include/asm/pgtable_types.h | 19 -------------------
>> arch/x86/mm/pgprot.c | 19 +++++++++++++++++++
>> include/linux/mm.h | 2 ++
>> mm/mmap.c | 2 +-
>> 11 files changed, 87 insertions(+), 57 deletions(-)
>>
>
>> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
>> index d564d0ecd4cd..8ed2a80c896e 100644
>> --- a/arch/powerpc/include/asm/pgtable.h
>> +++ b/arch/powerpc/include/asm/pgtable.h
>> @@ -21,6 +21,7 @@ struct mm_struct;
>> #endif /* !CONFIG_PPC_BOOK3S */
>>
>> /* Note due to the way vm flags are laid out, the bits are XWR */
>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>
> Ok, so until now it was common to all powerpc platforms. Now you define
> a different way whether it is a PPC_BOOK3S_64 or another platform ?
> What's the point ?
On powerpc,
select ARCH_HAS_VM_GET_PAGE_PROT if PPC_BOOK3S_64
Currently protection_map[] which requires __PXXX/__SXXX macros,
is applicable on all platforms, irrespective whether they enable
ARCH_HAS_VM_GET_PAGE_PROT or not. But because protection_map[]
is being made private for ARCH_HAS_VM_GET_PAGE_PROT enabling
platforms, they will not require __PXXX/__SXXX macros anymore.
In this case, PPC_BOOK3S_64 does not require the macros anymore,
where as other powerpc platforms will still require them as they
depend on the generic protection_map[].
>
>> #define __P000 PAGE_NONE
>> #define __P001 PAGE_READONLY
>> #define __P010 PAGE_COPY
>> @@ -38,6 +39,7 @@ struct mm_struct;
>> #define __S101 PAGE_READONLY_X
>> #define __S110 PAGE_SHARED_X
>> #define __S111 PAGE_SHARED_X
>> +#endif
>>
>> #ifndef __ASSEMBLY__
>>
>> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
>> index 7b9966402b25..2cf10a17c0a9 100644
>> --- a/arch/powerpc/mm/book3s64/pgtable.c
>> +++ b/arch/powerpc/mm/book3s64/pgtable.c
>> @@ -551,6 +551,26 @@ unsigned long memremap_compat_align(void)
>> EXPORT_SYMBOL_GPL(memremap_compat_align);
>> #endif
>>
>> +/* Note due to the way vm flags are laid out, the bits are XWR */
>> +static pgprot_t protection_map[16] __ro_after_init = {
>
> I don't think powerpc modifies that at all. Could be const instead of
> ro_after_init.
Sure, will change that.
>
>> + [VM_NONE] = PAGE_NONE,
>> + [VM_READ] = PAGE_READONLY,
>> + [VM_WRITE] = PAGE_COPY,
>> + [VM_WRITE | VM_READ] = PAGE_COPY,
>> + [VM_EXEC] = PAGE_READONLY_X,
>> + [VM_EXEC | VM_READ] = PAGE_READONLY_X,
>> + [VM_EXEC | VM_WRITE] = PAGE_COPY_X,
>> + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
>> + [VM_SHARED] = PAGE_NONE,
>> + [VM_SHARED | VM_READ] = PAGE_READONLY,
>> + [VM_SHARED | VM_WRITE] = PAGE_SHARED,
>> + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
>> + [VM_SHARED | VM_EXEC] = PAGE_READONLY_X,
>> + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
>> + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
>> + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
>> +};
>
> That's nice but it could apply to all powerpc platforms. Why restrict it
> to book3s/64 ?
Because as mentioned earlier, others powerpc platforms do not
enable ARCH_HAS_VM_GET_PAGE_PROT.
>
>> +
>> pgprot_t vm_get_page_prot(unsigned long vm_flags)
>> {
>> unsigned long prot = pgprot_val(protection_map[vm_flags &
>> diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
>> index 4866625da314..bca98b280fdd 100644
>> --- a/arch/sparc/include/asm/pgtable_32.h
>> +++ b/arch/sparc/include/asm/pgtable_32.h
>> @@ -65,6 +65,7 @@ void paging_init(void);
>> extern unsigned long ptr_in_current_pgd;
>>
>> /* xwr */
>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>
> CONFIG_ARCH_HAS_VM_GET_PAGE_PROT is selected by sparc64 only, is that
> ifdef needed at all ?
Not really necessary, but added just to tighten up.
>
>> #define __P000 PAGE_NONE
>> #define __P001 PAGE_READONLY
>> #define __P010 PAGE_COPY
>> @@ -82,6 +83,7 @@ extern unsigned long ptr_in_current_pgd;
>> #define __S101 PAGE_READONLY
>> #define __S110 PAGE_SHARED
>> #define __S111 PAGE_SHARED
>> +#endif
>>
>> /* First physical page can be anywhere, the following is needed so that
>> * va-->pa and vice versa conversions work properly without performance
>> diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
>> index 4679e45c8348..a779418ceba9 100644
>> --- a/arch/sparc/include/asm/pgtable_64.h
>> +++ b/arch/sparc/include/asm/pgtable_64.h
>> @@ -187,25 +187,6 @@ bool kern_addr_valid(unsigned long addr);
>> #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
>> #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
>>
>> -/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
>> -#define __P000 __pgprot(0)
>> -#define __P001 __pgprot(0)
>> -#define __P010 __pgprot(0)
>> -#define __P011 __pgprot(0)
>> -#define __P100 __pgprot(0)
>> -#define __P101 __pgprot(0)
>> -#define __P110 __pgprot(0)
>> -#define __P111 __pgprot(0)
>> -
>> -#define __S000 __pgprot(0)
>> -#define __S001 __pgprot(0)
>> -#define __S010 __pgprot(0)
>> -#define __S011 __pgprot(0)
>> -#define __S100 __pgprot(0)
>> -#define __S101 __pgprot(0)
>> -#define __S110 __pgprot(0)
>> -#define __S111 __pgprot(0)
>> -
>> #ifndef __ASSEMBLY__
>>
>> pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
>> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
>> index f6174df2d5af..6edc2a68b73c 100644
>> --- a/arch/sparc/mm/init_64.c
>> +++ b/arch/sparc/mm/init_64.c
>> @@ -2634,6 +2634,26 @@ void vmemmap_free(unsigned long start, unsigned long end,
>> }
>> #endif /* CONFIG_SPARSEMEM_VMEMMAP */
>>
>> +/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
>> +static pgprot_t protection_map[16] __ro_after_init = {
>> + [VM_NONE] = __pgprot(0),
>> + [VM_READ] = __pgprot(0),
>> + [VM_WRITE] = __pgprot(0),
>> + [VM_WRITE | VM_READ] = __pgprot(0),
>> + [VM_EXEC] = __pgprot(0),
>> + [VM_EXEC | VM_READ] = __pgprot(0),
>> + [VM_EXEC | VM_WRITE] = __pgprot(0),
>> + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(0),
>> + [VM_SHARED] = __pgprot(0),
>> + [VM_SHARED | VM_READ] = __pgprot(0),
>> + [VM_SHARED | VM_WRITE] = __pgprot(0),
>> + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(0),
>> + [VM_SHARED | VM_EXEC] = __pgprot(0),
>> + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(0),
>> + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(0),
>> + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(0)
>> +};
>
> __pgprot(0) is 0 so you don't need to initialise the fields at all, it
> is zeroized at startup as part of BSS section.
Sure, will change.
>
>> +
>> static void prot_init_common(unsigned long page_none,
>> unsigned long page_shared,
>> unsigned long page_copy,
>
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index bc8f326be0ce..2254c1980c8e 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -420,11 +420,13 @@ extern unsigned int kobjsize(const void *objp);
>> #endif
>> #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
>>
>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>> /*
>> * mapping from the currently active vm_flags protection bits (the
>> * low four bits) to a page protection mask..
>> */
>> extern pgprot_t protection_map[16];
>> +#endif
>>
>> /*
>> * The default fault flags that should be used by most of the
>> diff --git a/mm/mmap.c b/mm/mmap.c
>> index 61e6135c54ef..e66920414945 100644
>> --- a/mm/mmap.c
>> +++ b/mm/mmap.c
>> @@ -101,6 +101,7 @@ static void unmap_region(struct mm_struct *mm,
>> * w: (no) no
>> * x: (yes) yes
>> */
>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>> pgprot_t protection_map[16] __ro_after_init = {
>> [VM_NONE] = __P000,
>> [VM_READ] = __P001,
>> @@ -120,7 +121,6 @@ pgprot_t protection_map[16] __ro_after_init = {
>> [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111
>> };
>>
>> -#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>
> Why not let architectures provide their protection_map[] and keep that
> function ?
Just to understand this correctly.
All platforms provide their private protection_map[] array, drop __SXXX, __PXXX
macros which will not be required anymore, depend on generic vm_get_page_prot()
array look up, unless they need custom function via ARCH_HAS_VM_GET_PAGE_PROT ?
>
>> pgprot_t vm_get_page_prot(unsigned long vm_flags)
>> {
>> return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
Powered by blists - more mailing lists