[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJF2gTSHH69B+KAOJdpLvQdGuhS1nJ+GnW2hVCb+e+8nrnaJ7Q@mail.gmail.com>
Date: Thu, 20 Oct 2022 10:16:47 +0800
From: Guo Ren <guoren@...nel.org>
To: Jisheng Zhang <jszhang@...nel.org>
Cc: Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] riscv: fix race when vmap stack overflow
On Wed, Oct 19, 2022 at 11:57 PM Jisheng Zhang <jszhang@...nel.org> wrote:
>
> Currently, when detecting vmap stack overflow, riscv firstly switches
> to the so called shadow stack, then use this shadow stack to call the
> get_overflow_stack() to get the overflow stack. However, there's
> a race here if two or more harts use the same shadow stack at the same
> time.
>
> To solve this race, we introduce spin_shadow_stack atomic var, which
> will make the shadow stack usage serialized.
>
> Fixes: 31da94c25aea ("riscv: add VMAP_STACK overflow detection")
> Signed-off-by: Jisheng Zhang <jszhang@...nel.org>
> Suggested-by: Guo Ren <guoren@...nel.org>
> ---
> arch/riscv/kernel/entry.S | 4 ++++
> arch/riscv/kernel/traps.c | 4 ++++
> 2 files changed, 8 insertions(+)
>
> diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> index b9eda3fcbd6d..7b924b16792b 100644
> --- a/arch/riscv/kernel/entry.S
> +++ b/arch/riscv/kernel/entry.S
> @@ -404,6 +404,10 @@ handle_syscall_trace_exit:
>
> #ifdef CONFIG_VMAP_STACK
> handle_kernel_stack_overflow:
> +1: la sp, spin_shadow_stack
> + amoswap.w sp, sp, (sp)
If CONFIG_64BIT=y, it would be broken. Because we only hold 32bit of
the sp, and the next loop would get the wrong sp value of
&spin_shadow_stack.
Here is the correction.
-----
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 1b471ff73178..acf563072b8b 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -23,6 +23,7 @@
#define REG_L __REG_SEL(ld, lw)
#define REG_S __REG_SEL(sd, sw)
#define REG_SC __REG_SEL(sc.d, sc.w)
+#define REG_AMOSWAP __REG_SEL(amoswap.d, amoswap.w)
#define REG_ASM __REG_SEL(.dword, .word)
#define SZREG __REG_SEL(8, 4)
#define LGREG __REG_SEL(3, 2)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index b9eda3fcbd6d..ea6b78dac739 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -404,6 +404,10 @@ handle_syscall_trace_exit:
#ifdef CONFIG_VMAP_STACK
handle_kernel_stack_overflow:
+1: la sp, spin_shadow_stack
+ /* Reuse the address as the spin value, so they must be all
XLEN's width. */
+ REG_AMOSWAP sp, sp, (sp)
+ bnez sp, 1b
+
la sp, shadow_stack
addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index f3e96d60a2ff..9e6cc0d63833 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -221,11 +221,15 @@ asmlinkage unsigned long get_overflow_stack(void)
OVERFLOW_STACK_SIZE;
}
+unsigned long spin_shadow_stack = 0;
+
asmlinkage void handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
+ smp_store_release(&spin_shadow_stack, 0);
+
console_verbose();
pr_emerg("Insufficient stack space to handle exception!\n");
> + bnez sp, 1b
> +
> la sp, shadow_stack
> addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
>
> diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
> index f3e96d60a2ff..88a54947dffb 100644
> --- a/arch/riscv/kernel/traps.c
> +++ b/arch/riscv/kernel/traps.c
> @@ -221,11 +221,15 @@ asmlinkage unsigned long get_overflow_stack(void)
> OVERFLOW_STACK_SIZE;
> }
>
> +atomic_t spin_shadow_stack;
> +
> asmlinkage void handle_bad_stack(struct pt_regs *regs)
> {
> unsigned long tsk_stk = (unsigned long)current->stack;
> unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
>
> + atomic_set_release(&spin_shadow_stack, 0);
> +
> console_verbose();
>
> pr_emerg("Insufficient stack space to handle exception!\n");
> --
> 2.37.2
>
--
Best Regards
Guo Ren
Powered by blists - more mailing lists