[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5794f254-0523-7f2f-f9e7-ff64a7fe400d@csgroup.eu>
Date: Mon, 25 Oct 2021 23:53:41 +0200
From: Christophe Leroy <christophe.leroy@...roup.eu>
To: Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>
Cc: linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org
Subject: Re: [PATCH v2 02/10] powerpc/book3e: Fix set_memory_x() and
set_memory_nx()
On 23/10/2021 13:47, Christophe Leroy wrote:
> set_memory_x() calls pte_mkexec() which sets _PAGE_EXEC.
> set_memory_nx() calls pte_exprotec() which clears _PAGE_EXEC.
>
> Book3e has 2 bits, UX and SX, which defines the exec rights
> resp. for user (PR=1) and for kernel (PR=0).
>
> _PAGE_EXEC is defined as UX only.
>
> An executable kernel page is set with either _PAGE_KERNEL_RWX
> or _PAGE_KERNEL_ROX, which both have SX set and UX cleared.
>
> So set_memory_nx() call for an executable kernel page does
> nothing because UX is already cleared.
>
> And set_memory_x() on a non-executable kernel page makes it
> executable for the user and keeps it non-executable for kernel.
>
> Also, pte_exec() always returns 'false' on kernel pages, because
> it checks _PAGE_EXEC which doesn't include SX, so for instance
> the W+X check doesn't work.
>
> To fix this:
> - change tlb_low_64e.S to use _PAGE_BAP_UX instead of _PAGE_USER
> - sets both UX and SX in _PAGE_EXEC so that pte_user() returns
> true whenever one of the two bits is set and pte_exprotect()
> clears both bits.
> - Define a book3e specific version of pte_mkexec() which sets
> either SX or UX based on UR.
>
> Fixes: 1f9ad21c3b38 ("powerpc/mm: Implement set_memory() routines")
> Signed-off-by: Christophe Leroy <christophe.leroy@...roup.eu>
> ---
> v2: New
pte_mkexec() in nohash/64/pgtable.h conflicts with the one in
nohash/pte_book3e.h
Should guard it with #ifndef pte_mkexec(), but as pte_book3e is the
only user in 64 bits, then just remove it from there.
Send v3 with only that change compared to v2.
Christophe
> ---
> arch/powerpc/include/asm/nohash/32/pgtable.h | 2 ++
> arch/powerpc/include/asm/nohash/pte-book3e.h | 18 ++++++++++++++----
> arch/powerpc/mm/nohash/tlb_low_64e.S | 8 ++++----
> 3 files changed, 20 insertions(+), 8 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
> index ac0a5ff48c3a..d6ba821a56ce 100644
> --- a/arch/powerpc/include/asm/nohash/32/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
> @@ -193,10 +193,12 @@ static inline pte_t pte_wrprotect(pte_t pte)
> }
> #endif
>
> +#ifndef pte_mkexec
> static inline pte_t pte_mkexec(pte_t pte)
> {
> return __pte(pte_val(pte) | _PAGE_EXEC);
> }
> +#endif
>
> #define pmd_none(pmd) (!pmd_val(pmd))
> #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
> diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h
> index 813918f40765..f798640422c2 100644
> --- a/arch/powerpc/include/asm/nohash/pte-book3e.h
> +++ b/arch/powerpc/include/asm/nohash/pte-book3e.h
> @@ -48,7 +48,7 @@
> #define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */
>
> /* "Higher level" linux bit combinations */
> -#define _PAGE_EXEC _PAGE_BAP_UX /* .. and was cache cleaned */
> +#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */
> #define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
> #define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
> #define _PAGE_KERNEL_RO (_PAGE_BAP_SR)
> @@ -93,11 +93,11 @@
> /* Permission masks used to generate the __P and __S table */
> #define PAGE_NONE __pgprot(_PAGE_BASE)
> #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
> -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
> +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX)
> #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
> -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
> +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
> #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
> -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
> +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
>
> #ifndef __ASSEMBLY__
> static inline pte_t pte_mkprivileged(pte_t pte)
> @@ -113,6 +113,16 @@ static inline pte_t pte_mkuser(pte_t pte)
> }
>
> #define pte_mkuser pte_mkuser
> +
> +static inline pte_t pte_mkexec(pte_t pte)
> +{
> + if (pte_val(pte) & _PAGE_BAP_UR)
> + return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
> + else
> + return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX);
> +}
> +#define pte_mkexec pte_mkexec
> +
> #endif /* __ASSEMBLY__ */
>
> #endif /* __KERNEL__ */
> diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
> index bf24451f3e71..9235e720e357 100644
> --- a/arch/powerpc/mm/nohash/tlb_low_64e.S
> +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
> @@ -222,7 +222,7 @@ tlb_miss_kernel_bolted:
>
> tlb_miss_fault_bolted:
> /* We need to check if it was an instruction miss */
> - andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
> + andi. r10,r11,_PAGE_BAP_UX|_PAGE_BAP_SX
> bne itlb_miss_fault_bolted
> dtlb_miss_fault_bolted:
> tlb_epilog_bolted
> @@ -239,7 +239,7 @@ itlb_miss_fault_bolted:
> srdi r15,r16,60 /* get region */
> bne- itlb_miss_fault_bolted
>
> - li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
> + li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */
>
> /* We do the user/kernel test for the PID here along with the RW test
> */
> @@ -614,7 +614,7 @@ itlb_miss_fault_e6500:
>
> /* We do the user/kernel test for the PID here along with the RW test
> */
> - li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
> + li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */
> oris r11,r11,_PAGE_ACCESSED@h
>
> cmpldi cr0,r15,0 /* Check for user region */
> @@ -734,7 +734,7 @@ normal_tlb_miss_done:
>
> normal_tlb_miss_access_fault:
> /* We need to check if it was an instruction miss */
> - andi. r10,r11,_PAGE_EXEC
> + andi. r10,r11,_PAGE_BAP_UX
> bne 1f
> ld r14,EX_TLB_DEAR(r12)
> ld r15,EX_TLB_ESR(r12)
>
Powered by blists - more mailing lists