[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZkJcB5u+0bZ2KsS+@debug.ba.rivosinc.com>
Date: Mon, 13 May 2024 11:29:27 -0700
From: Deepak Gupta <debug@...osinc.com>
To: Alexandre Ghiti <alex@...ti.fr>
Cc: paul.walmsley@...ive.com, rick.p.edgecombe@...el.com,
broonie@...nel.org, Szabolcs.Nagy@....com, kito.cheng@...ive.com,
keescook@...omium.org, ajones@...tanamicro.com,
conor.dooley@...rochip.com, cleger@...osinc.com,
atishp@...shpatra.org, bjorn@...osinc.com, alexghiti@...osinc.com,
samuel.holland@...ive.com, conor@...nel.org,
linux-doc@...r.kernel.org, linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org, devicetree@...r.kernel.org,
linux-mm@...ck.org, linux-arch@...r.kernel.org,
linux-kselftest@...r.kernel.org, corbet@....net, palmer@...belt.com,
aou@...s.berkeley.edu, robh+dt@...nel.org,
krzysztof.kozlowski+dt@...aro.org, oleg@...hat.com,
akpm@...ux-foundation.org, arnd@...db.de, ebiederm@...ssion.com,
Liam.Howlett@...cle.com, vbabka@...e.cz, lstoakes@...il.com,
shuah@...nel.org, brauner@...nel.org, andy.chiu@...ive.com,
jerry.shih@...ive.com, hankuan.chen@...ive.com,
greentime.hu@...ive.com, evan@...osinc.com, xiao.w.wang@...el.com,
charlie@...osinc.com, apatel@...tanamicro.com,
mchitale@...tanamicro.com, dbarboza@...tanamicro.com,
sameo@...osinc.com, shikemeng@...weicloud.com, willy@...radead.org,
vincent.chen@...ive.com, guoren@...nel.org, samitolvanen@...gle.com,
songshuaishuai@...ylab.org, gerg@...nel.org, heiko@...ech.de,
bhe@...hat.com, jeeheng.sia@...rfivetech.com, cyy@...self.name,
maskray@...gle.com, ancientmodern4@...il.com,
mathis.salmen@...sal.de, cuiyunhui@...edance.com,
bgray@...ux.ibm.com, mpe@...erman.id.au, baruch@...s.co.il,
alx@...nel.org, david@...hat.com, catalin.marinas@....com,
revest@...omium.org, josh@...htriplett.org, shr@...kernel.io,
deller@....de, omosnace@...hat.com, ojeda@...nel.org,
jhubbard@...dia.com
Subject: Re: [PATCH v3 10/29] riscv/mm : ensure PROT_WRITE leads to VM_READ |
VM_WRITE
On Sun, May 12, 2024 at 06:24:45PM +0200, Alexandre Ghiti wrote:
>Hi Deepak,
>
>On 04/04/2024 01:34, Deepak Gupta wrote:
>>`arch_calc_vm_prot_bits` is implemented on risc-v to return VM_READ |
>>VM_WRITE if PROT_WRITE is specified. Similarly `riscv_sys_mmap` is
>>updated to convert all incoming PROT_WRITE to (PROT_WRITE | PROT_READ).
>>This is to make sure that any existing apps using PROT_WRITE still work.
>>
>>Earlier `protection_map[VM_WRITE]` used to pick read-write PTE encodings.
>>Now `protection_map[VM_WRITE]` will always pick PAGE_SHADOWSTACK PTE
>>encodings for shadow stack. Above changes ensure that existing apps
>>continue to work because underneath kernel will be picking
>>`protection_map[VM_WRITE|VM_READ]` PTE encodings.
>>
>>Signed-off-by: Deepak Gupta <debug@...osinc.com>
>>---
>> arch/riscv/include/asm/mman.h | 24 ++++++++++++++++++++++++
>> arch/riscv/include/asm/pgtable.h | 1 +
>> arch/riscv/kernel/sys_riscv.c | 11 +++++++++++
>> arch/riscv/mm/init.c | 2 +-
>> mm/mmap.c | 1 +
>> 5 files changed, 38 insertions(+), 1 deletion(-)
>> create mode 100644 arch/riscv/include/asm/mman.h
>>
>>diff --git a/arch/riscv/include/asm/mman.h b/arch/riscv/include/asm/mman.h
>>new file mode 100644
>>index 000000000000..ef9fedf32546
>>--- /dev/null
>>+++ b/arch/riscv/include/asm/mman.h
>>@@ -0,0 +1,24 @@
>>+/* SPDX-License-Identifier: GPL-2.0 */
>>+#ifndef __ASM_MMAN_H__
>>+#define __ASM_MMAN_H__
>>+
>>+#include <linux/compiler.h>
>>+#include <linux/types.h>
>>+#include <uapi/asm/mman.h>
>>+
>>+static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
>>+ unsigned long pkey __always_unused)
>>+{
>>+ unsigned long ret = 0;
>>+
>>+ /*
>>+ * If PROT_WRITE was specified, force it to VM_READ | VM_WRITE.
>>+ * Only VM_WRITE means shadow stack.
>>+ */
>>+ if (prot & PROT_WRITE)
>>+ ret = (VM_READ | VM_WRITE);
>>+ return ret;
>>+}
>>+#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
>>+
>>+#endif /* ! __ASM_MMAN_H__ */
>>diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
>>index 6066822e7396..4d5983bc6766 100644
>>--- a/arch/riscv/include/asm/pgtable.h
>>+++ b/arch/riscv/include/asm/pgtable.h
>>@@ -184,6 +184,7 @@ extern struct pt_alloc_ops pt_ops __initdata;
>> #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
>> #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
>> _PAGE_EXEC | _PAGE_WRITE)
>>+#define PAGE_SHADOWSTACK __pgprot(_PAGE_BASE | _PAGE_WRITE)
>> #define PAGE_COPY PAGE_READ
>> #define PAGE_COPY_EXEC PAGE_READ_EXEC
>>diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
>>index f1c1416a9f1e..846c36b1b3d5 100644
>>--- a/arch/riscv/kernel/sys_riscv.c
>>+++ b/arch/riscv/kernel/sys_riscv.c
>>@@ -8,6 +8,8 @@
>> #include <linux/syscalls.h>
>> #include <asm/cacheflush.h>
>> #include <asm-generic/mman-common.h>
>>+#include <vdso/vsyscall.h>
>>+#include <asm/mman.h>
>> static long riscv_sys_mmap(unsigned long addr, unsigned long len,
>> unsigned long prot, unsigned long flags,
>>@@ -17,6 +19,15 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
>> if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
>> return -EINVAL;
>>+ /*
>>+ * If only PROT_WRITE is specified then extend that to PROT_READ
>>+ * protection_map[VM_WRITE] is now going to select shadow stack encodings.
>>+ * So specifying PROT_WRITE actually should select protection_map [VM_WRITE | VM_READ]
>>+ * If user wants to create shadow stack then they should use `map_shadow_stack` syscall.
>>+ */
>>+ if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ)))
>>+ prot |= PROT_READ;
>>+
>> return ksys_mmap_pgoff(addr, len, prot, flags, fd,
>> offset >> (PAGE_SHIFT - page_shift_offset));
>> }
>>diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
>>index fa34cf55037b..98e5ece4052a 100644
>>--- a/arch/riscv/mm/init.c
>>+++ b/arch/riscv/mm/init.c
>>@@ -299,7 +299,7 @@ pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
>> static const pgprot_t protection_map[16] = {
>> [VM_NONE] = PAGE_NONE,
>> [VM_READ] = PAGE_READ,
>>- [VM_WRITE] = PAGE_COPY,
>>+ [VM_WRITE] = PAGE_SHADOWSTACK,
>> [VM_WRITE | VM_READ] = PAGE_COPY,
>> [VM_EXEC] = PAGE_EXEC,
>> [VM_EXEC | VM_READ] = PAGE_READ_EXEC,
>>diff --git a/mm/mmap.c b/mm/mmap.c
>>index d89770eaab6b..57a974f49b00 100644
>>--- a/mm/mmap.c
>>+++ b/mm/mmap.c
>>@@ -47,6 +47,7 @@
>> #include <linux/oom.h>
>> #include <linux/sched/mm.h>
>> #include <linux/ksm.h>
>>+#include <linux/processor.h>
>> #include <linux/uaccess.h>
>> #include <asm/cacheflush.h>
>
>
>What happens if someone restricts the permission to PROT_WRITE using
>mprotect()? I would say this is an issue since it would turn the pages
>into shadow stack pages.
look at this patch in this patch series.
"riscv/mm : ensure PROT_WRITE leads to VM_READ | VM_WRITE"
It implements `arch_calc_vm_prot_bits` for risc-v and enforces that incoming
PROT_WRITE is converted to VM_READ | VM_WRITE. And thus it'll become read/write
memory. This way `mprotect` can be used to convert a shadow stack page to
read/write memory but not a regular memory to shadow stack page.
>
>
Powered by blists - more mailing lists