[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <874ji4af3a.fsf@linux.ibm.com>
Date: Thu, 02 Nov 2023 11:09:37 +0530
From: "Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>
To: Christophe Leroy <christophe.leroy@...roup.eu>,
Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>
Cc: Christophe Leroy <christophe.leroy@...roup.eu>,
linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
Russell Currey <ruscur@...sell.cc>,
Kees Cook <keescook@...omium.org>
Subject: Re: [PATCH v2 37/37] powerpc: Support execute-only on all powerpc
Christophe Leroy <christophe.leroy@...roup.eu> writes:
> Introduce PAGE_EXECONLY_X macro which provides exec-only rights.
> The _X may be seen as redundant with the EXECONLY but it helps
> keep consistancy, all macros having the EXEC right have _X.
>
> And put it next to PAGE_NONE as PAGE_EXECONLY_X is
> somehow PAGE_NONE + EXEC just like all other SOMETHING_X are
> just SOMETHING + EXEC.
>
> On book3s/64 PAGE_EXECONLY becomes PAGE_READONLY_X.
>
> On book3s/64, as PAGE_EXECONLY is only valid for Radix add
> VM_READ flag in vm_get_page_prot() for non-Radix.
>
> And update access_error() so that a non exec fault on a VM_EXEC only
> mapping is always invalid, even when the underlying layer don't
> always generate a fault for that.
>
> For 8xx, set PAGE_EXECONLY_X as _PAGE_NA | _PAGE_EXEC.
> For others, only set it as just _PAGE_EXEC
>
> With that change, 8xx, e500 and 44x fully honor execute-only
> protection.
>
> On 40x that is a partial implementation of execute-only. The
> implementation won't be complete because once a TLB has been loaded
> via the Instruction TLB miss handler, it will be possible to read
> the page. But at least it can't be read unless it is executed first.
>
> On 603 MMU, TLB missed are handled by SW and there are separate
> DTLB and ITLB. Execute-only is therefore now supported by not loading
> DTLB when read access is not permitted.
>
> On hash (604) MMU it is more tricky because hash table is common to
> load/store and execute. Nevertheless it is still possible to check
> whether _PAGE_READ is set before loading hash table for a load/store
> access. At least it can't be read unless it is executed first.
>
> Signed-off-by: Christophe Leroy <christophe.leroy@...roup.eu>
> Cc: Russell Currey <ruscur@...sell.cc>
> Cc: Kees Cook <keescook@...omium.org>
> ---
> arch/powerpc/include/asm/book3s/32/pgtable.h | 2 +-
> arch/powerpc/include/asm/book3s/64/pgtable.h | 4 +---
> arch/powerpc/include/asm/nohash/32/pte-8xx.h | 1 +
> arch/powerpc/include/asm/nohash/pgtable.h | 2 +-
> arch/powerpc/include/asm/nohash/pte-e500.h | 1 +
> arch/powerpc/include/asm/pgtable-masks.h | 2 ++
> arch/powerpc/mm/book3s64/pgtable.c | 10 ++++------
> arch/powerpc/mm/fault.c | 9 +++++----
> arch/powerpc/mm/pgtable.c | 4 ++--
> 9 files changed, 18 insertions(+), 17 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
> index 244621c88510..52971ee30717 100644
> --- a/arch/powerpc/include/asm/book3s/32/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
> @@ -425,7 +425,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
> {
> /*
> * A read-only access is controlled by _PAGE_READ bit.
> - * We have _PAGE_READ set for WRITE and EXECUTE
> + * We have _PAGE_READ set for WRITE
> */
> if (!pte_present(pte) || !pte_read(pte))
> return false;
>
Should this now be updated to check for EXEC bit ?
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 0fd12bdc7b5e..751b01227e36 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -18,6 +18,7 @@
> #define _PAGE_WRITE 0x00002 /* write access allowed */
> #define _PAGE_READ 0x00004 /* read access allowed */
> #define _PAGE_NA _PAGE_PRIVILEGED
> +#define _PAGE_NAX _PAGE_EXEC
> #define _PAGE_RO _PAGE_READ
> #define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC)
> #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
> @@ -141,9 +142,6 @@
>
> #include <asm/pgtable-masks.h>
>
> -/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */
> -#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC)
> -
> /* Permission masks used for kernel mappings */
> #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
> #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_TOLERANT)
> diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> index 1ee38befd29a..137dc3c84e45 100644
> --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> @@ -48,6 +48,7 @@
>
> #define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */
>
> +#define _PAGE_NAX (_PAGE_NA | _PAGE_EXEC)
> #define _PAGE_ROX (_PAGE_RO | _PAGE_EXEC)
> #define _PAGE_RW 0
> #define _PAGE_RWX _PAGE_EXEC
> diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
> index f922c84b23eb..a50be1de9f83 100644
> --- a/arch/powerpc/include/asm/nohash/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/pgtable.h
> @@ -203,7 +203,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
> {
> /*
> * A read-only access is controlled by _PAGE_READ bit.
> - * We have _PAGE_READ set for WRITE and EXECUTE
> + * We have _PAGE_READ set for WRITE
> */
> if (!pte_present(pte) || !pte_read(pte))
> return false;
>
Same here. if so I guess book3s/64 also will need an update?
> diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h
> index 31d2c3ea7df8..f516f0b5b7a8 100644
> --- a/arch/powerpc/include/asm/nohash/pte-e500.h
> +++ b/arch/powerpc/include/asm/nohash/pte-e500.h
> @@ -57,6 +57,7 @@
> #define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX)
>
> #define _PAGE_NA 0
> +#define _PAGE_NAX _PAGE_BAP_UX
> #define _PAGE_RO _PAGE_READ
> #define _PAGE_ROX (_PAGE_READ | _PAGE_BAP_UX)
> #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
> diff --git a/arch/powerpc/include/asm/pgtable-masks.h b/arch/powerpc/include/asm/pgtable-masks.h
> index 808a3b9e8fc0..6e8e2db26a5a 100644
> --- a/arch/powerpc/include/asm/pgtable-masks.h
> +++ b/arch/powerpc/include/asm/pgtable-masks.h
> @@ -4,6 +4,7 @@
>
> #ifndef _PAGE_NA
> #define _PAGE_NA 0
> +#define _PAGE_NAX _PAGE_EXEC
> #define _PAGE_RO _PAGE_READ
> #define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC)
> #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
> @@ -20,6 +21,7 @@
>
> /* Permission masks used to generate the __P and __S table */
> #define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA)
> +#define PAGE_EXECONLY_X __pgprot(_PAGE_BASE | _PAGE_NAX)
> #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
> #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RWX)
> #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO)
> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
> index 8f8a62d3ff4d..be229290a6a7 100644
> --- a/arch/powerpc/mm/book3s64/pgtable.c
> +++ b/arch/powerpc/mm/book3s64/pgtable.c
> @@ -635,12 +635,10 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
> unsigned long prot;
>
> /* Radix supports execute-only, but protection_map maps X -> RX */
> - if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) {
> - prot = pgprot_val(PAGE_EXECONLY);
> - } else {
> - prot = pgprot_val(protection_map[vm_flags &
> - (VM_ACCESS_FLAGS | VM_SHARED)]);
> - }
> + if (!radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))
> + vm_flags |= VM_READ;
> +
> + prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]);
>
> if (vm_flags & VM_SAO)
> prot |= _PAGE_SAO;
> diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
> index b1723094d464..9e49ede2bc1c 100644
> --- a/arch/powerpc/mm/fault.c
> +++ b/arch/powerpc/mm/fault.c
> @@ -266,14 +266,15 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma
> }
>
> /*
> - * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
> - * defined in protection_map[]. Read faults can only be caused by
> - * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
> + * VM_READ, VM_WRITE and VM_EXEC may imply read permissions, as
> + * defined in protection_map[]. In that case Read faults can only be
> + * caused by a PROT_NONE mapping. However a non exec access on a
> + * VM_EXEC only mapping is invalid anyway, so report it as such.
> */
> if (unlikely(!vma_is_accessible(vma)))
> return true;
>
> - if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
> + if ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)
> return true;
>
> /*
> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
> index 781a68c69c2f..79508c1d15d7 100644
> --- a/arch/powerpc/mm/pgtable.c
> +++ b/arch/powerpc/mm/pgtable.c
> @@ -492,7 +492,7 @@ const pgprot_t protection_map[16] = {
> [VM_READ] = PAGE_READONLY,
> [VM_WRITE] = PAGE_COPY,
> [VM_WRITE | VM_READ] = PAGE_COPY,
> - [VM_EXEC] = PAGE_READONLY_X,
> + [VM_EXEC] = PAGE_EXECONLY_X,
> [VM_EXEC | VM_READ] = PAGE_READONLY_X,
> [VM_EXEC | VM_WRITE] = PAGE_COPY_X,
> [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
> @@ -500,7 +500,7 @@ const pgprot_t protection_map[16] = {
> [VM_SHARED | VM_READ] = PAGE_READONLY,
> [VM_SHARED | VM_WRITE] = PAGE_SHARED,
> [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
> - [VM_SHARED | VM_EXEC] = PAGE_READONLY_X,
> + [VM_SHARED | VM_EXEC] = PAGE_EXECONLY_X,
> [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
> [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
> [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
> --
> 2.41.0
Powered by blists - more mailing lists