[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <22972e7b-0844-4ebc-8d82-a0838b83c3a0@lucifer.local>
Date: Wed, 9 Oct 2024 14:36:12 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Deepak Gupta <debug@...osinc.com>
Cc: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Andrew Morton <akpm@...ux-foundation.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>,
Conor Dooley <conor@...nel.org>, Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>,
Arnd Bergmann <arnd@...db.de>, Christian Brauner <brauner@...nel.org>,
Peter Zijlstra <peterz@...radead.org>, Oleg Nesterov <oleg@...hat.com>,
Eric Biederman <ebiederm@...ssion.com>, Kees Cook <kees@...nel.org>,
Jonathan Corbet <corbet@....net>, Shuah Khan <shuah@...nel.org>,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, linux-riscv@...ts.infradead.org,
devicetree@...r.kernel.org, linux-arch@...r.kernel.org,
linux-doc@...r.kernel.org, linux-kselftest@...r.kernel.org,
alistair.francis@....com, richard.henderson@...aro.org,
jim.shu@...ive.com, andybnac@...il.com, kito.cheng@...ive.com,
charlie@...osinc.com, atishp@...osinc.com, evan@...osinc.com,
cleger@...osinc.com, alexghiti@...osinc.com, samitolvanen@...gle.com,
broonie@...nel.org, rick.p.edgecombe@...el.com
Subject: Re: [PATCH v6 11/33] riscv/mm : ensure PROT_WRITE leads to VM_READ |
VM_WRITE
On Tue, Oct 08, 2024 at 03:36:53PM -0700, Deepak Gupta wrote:
> `arch_calc_vm_prot_bits` is implemented on risc-v to return VM_READ |
> VM_WRITE if PROT_WRITE is specified. Similarly `riscv_sys_mmap` is
> updated to convert all incoming PROT_WRITE to (PROT_WRITE | PROT_READ).
> This is to make sure that any existing apps using PROT_WRITE still work.
>
> Earlier `protection_map[VM_WRITE]` used to pick read-write PTE encodings.
> Now `protection_map[VM_WRITE]` will always pick PAGE_SHADOWSTACK PTE
> encodings for shadow stack. Above changes ensure that existing apps
> continue to work because underneath kernel will be picking
> `protection_map[VM_WRITE|VM_READ]` PTE encodings.
>
> Signed-off-by: Deepak Gupta <debug@...osinc.com>
> ---
> arch/riscv/include/asm/mman.h | 24 ++++++++++++++++++++++++
> arch/riscv/include/asm/pgtable.h | 1 +
> arch/riscv/kernel/sys_riscv.c | 10 ++++++++++
> arch/riscv/mm/init.c | 2 +-
> mm/mmap.c | 1 +
> 5 files changed, 37 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/include/asm/mman.h b/arch/riscv/include/asm/mman.h
> new file mode 100644
> index 000000000000..ef9fedf32546
> --- /dev/null
> +++ b/arch/riscv/include/asm/mman.h
> @@ -0,0 +1,24 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_MMAN_H__
> +#define __ASM_MMAN_H__
> +
> +#include <linux/compiler.h>
> +#include <linux/types.h>
> +#include <uapi/asm/mman.h>
> +
> +static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
> + unsigned long pkey __always_unused)
> +{
> + unsigned long ret = 0;
> +
> + /*
> + * If PROT_WRITE was specified, force it to VM_READ | VM_WRITE.
> + * Only VM_WRITE means shadow stack.
> + */
> + if (prot & PROT_WRITE)
> + ret = (VM_READ | VM_WRITE);
> + return ret;
> +}
> +#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
> +
> +#endif /* ! __ASM_MMAN_H__ */
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index e79f15293492..4948a1f18ae8 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -177,6 +177,7 @@ extern struct pt_alloc_ops pt_ops __meminitdata;
> #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
> #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
> _PAGE_EXEC | _PAGE_WRITE)
> +#define PAGE_SHADOWSTACK __pgprot(_PAGE_BASE | _PAGE_WRITE)
>
> #define PAGE_COPY PAGE_READ
> #define PAGE_COPY_EXEC PAGE_READ_EXEC
> diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
> index d77afe05578f..43a448bf254b 100644
> --- a/arch/riscv/kernel/sys_riscv.c
> +++ b/arch/riscv/kernel/sys_riscv.c
> @@ -7,6 +7,7 @@
>
> #include <linux/syscalls.h>
> #include <asm/cacheflush.h>
> +#include <asm-generic/mman-common.h>
>
> static long riscv_sys_mmap(unsigned long addr, unsigned long len,
> unsigned long prot, unsigned long flags,
> @@ -16,6 +17,15 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
> if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
> return -EINVAL;
>
> + /*
> + * If PROT_WRITE is specified then extend that to PROT_READ
> + * protection_map[VM_WRITE] is now going to select shadow stack encodings.
> + * So specifying PROT_WRITE actually should select protection_map [VM_WRITE | VM_READ]
> + * If user wants to create shadow stack then they should use `map_shadow_stack` syscall.
> + */
> + if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ)))
> + prot |= PROT_READ;
> +
> return ksys_mmap_pgoff(addr, len, prot, flags, fd,
> offset >> (PAGE_SHIFT - page_shift_offset));
> }
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index 0e8c20adcd98..964810aeb405 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -326,7 +326,7 @@ pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
> static const pgprot_t protection_map[16] = {
> [VM_NONE] = PAGE_NONE,
> [VM_READ] = PAGE_READ,
> - [VM_WRITE] = PAGE_COPY,
> + [VM_WRITE] = PAGE_SHADOWSTACK,
> [VM_WRITE | VM_READ] = PAGE_COPY,
> [VM_EXEC] = PAGE_EXEC,
> [VM_EXEC | VM_READ] = PAGE_READ_EXEC,
> diff --git a/mm/mmap.c b/mm/mmap.c
> index dd4b35a25aeb..b56f1e8cbfc6 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -47,6 +47,7 @@
> #include <linux/oom.h>
> #include <linux/sched/mm.h>
> #include <linux/ksm.h>
> +#include <linux/processor.h>
This seems benign enough, just wonder why you need it?
>
> #include <linux/uaccess.h>
> #include <asm/cacheflush.h>
>
> --
> 2.45.0
>
Powered by blists - more mailing lists