[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4830e415-cdbb-7050-ebd6-7480493655ef@csgroup.eu>
Date: Thu, 16 Jun 2022 05:35:36 +0000
From: Christophe Leroy <christophe.leroy@...roup.eu>
To: Anshuman Khandual <anshuman.khandual@....com>,
"linux-mm@...ck.org" <linux-mm@...ck.org>
CC: "hch@...radead.org" <hch@...radead.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Christoph Hellwig <hch@....de>
Subject: Re: [PATCH V3 1/2] mm/mmap: Restrict generic protection_map[] array
visibility
Le 16/06/2022 à 06:09, Anshuman Khandual a écrit :
> Restrict generic protection_map[] array visibility only for platforms which
> do not enable ARCH_HAS_VM_GET_PAGE_PROT. For other platforms that do define
> their own vm_get_page_prot() enabling ARCH_HAS_VM_GET_PAGE_PROT, could have
> their private static protection_map[] still implementing an array look up.
> These private protection_map[] array could do without __PXXX/__SXXX macros,
> making them redundant and dropping them off as well.
>
> But platforms which do not define their custom vm_get_page_prot() enabling
> ARCH_HAS_VM_GET_PAGE_PROT, will still have to provide __PXXX/__SXXX macros.
>
> Cc: Andrew Morton <akpm@...ux-foundation.org>
> Cc: linux-mm@...ck.org
> Cc: linux-kernel@...r.kernel.org
> Acked-by: Christoph Hellwig <hch@....de>
> Signed-off-by: Anshuman Khandual <anshuman.khandual@....com>
> ---
> arch/arm64/include/asm/pgtable-prot.h | 18 ------------------
> arch/arm64/mm/mmap.c | 21 +++++++++++++++++++++
> arch/powerpc/include/asm/pgtable.h | 2 ++
> arch/powerpc/mm/book3s64/pgtable.c | 20 ++++++++++++++++++++
> arch/sparc/include/asm/pgtable_64.h | 19 -------------------
> arch/sparc/mm/init_64.c | 3 +++
> arch/x86/include/asm/pgtable_types.h | 19 -------------------
> arch/x86/mm/pgprot.c | 19 +++++++++++++++++++
> include/linux/mm.h | 2 ++
> mm/mmap.c | 2 +-
> 10 files changed, 68 insertions(+), 57 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index d564d0ecd4cd..8ed2a80c896e 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -21,6 +21,7 @@ struct mm_struct;
> #endif /* !CONFIG_PPC_BOOK3S */
>
> /* Note due to the way vm flags are laid out, the bits are XWR */
> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
This ifdef if not necessary for now, it doesn't matter if __P000 etc
still exist thought not used.
> #define __P000 PAGE_NONE
> #define __P001 PAGE_READONLY
> #define __P010 PAGE_COPY
> @@ -38,6 +39,7 @@ struct mm_struct;
> #define __S101 PAGE_READONLY_X
> #define __S110 PAGE_SHARED_X
> #define __S111 PAGE_SHARED_X
> +#endif
>
> #ifndef __ASSEMBLY__
>
> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
> index 7b9966402b25..d3b019b95c1d 100644
> --- a/arch/powerpc/mm/book3s64/pgtable.c
> +++ b/arch/powerpc/mm/book3s64/pgtable.c
> @@ -551,6 +551,26 @@ unsigned long memremap_compat_align(void)
> EXPORT_SYMBOL_GPL(memremap_compat_align);
> #endif
>
> +/* Note due to the way vm flags are laid out, the bits are XWR */
> +static const pgprot_t protection_map[16] = {
> + [VM_NONE] = PAGE_NONE,
> + [VM_READ] = PAGE_READONLY,
> + [VM_WRITE] = PAGE_COPY,
> + [VM_WRITE | VM_READ] = PAGE_COPY,
> + [VM_EXEC] = PAGE_READONLY_X,
> + [VM_EXEC | VM_READ] = PAGE_READONLY_X,
> + [VM_EXEC | VM_WRITE] = PAGE_COPY_X,
> + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
> + [VM_SHARED] = PAGE_NONE,
> + [VM_SHARED | VM_READ] = PAGE_READONLY,
> + [VM_SHARED | VM_WRITE] = PAGE_SHARED,
> + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
> + [VM_SHARED | VM_EXEC] = PAGE_READONLY_X,
> + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
> + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
> + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
> +};
> +
There is not much point is first additing that here and then move it
elsewhere in the second patch.
I think with my suggestion to use #ifdef __P000 as a guard, the powerpc
changes could go in a single patch.
> pgprot_t vm_get_page_prot(unsigned long vm_flags)
> {
> unsigned long prot = pgprot_val(protection_map[vm_flags &
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 61e6135c54ef..e66920414945 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -101,6 +101,7 @@ static void unmap_region(struct mm_struct *mm,
> * w: (no) no
> * x: (yes) yes
> */
> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
You should use #ifdef __P000 instead, that way you could migrate
architectures one by one.
> pgprot_t protection_map[16] __ro_after_init = {
> [VM_NONE] = __P000,
> [VM_READ] = __P001,
> @@ -120,7 +121,6 @@ pgprot_t protection_map[16] __ro_after_init = {
> [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111
> };
>
> -#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
> pgprot_t vm_get_page_prot(unsigned long vm_flags)
> {
> return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
Powered by blists - more mailing lists