[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230614013018.2168426-3-guoren@kernel.org>
Date: Tue, 13 Jun 2023 21:30:17 -0400
From: guoren@...nel.org
To: arnd@...db.de, guoren@...nel.org, palmer@...osinc.com,
conor.dooley@...rochip.com, heiko@...ech.de, jszhang@...nel.org,
bjorn@...nel.org, cleger@...osinc.com
Cc: linux-arch@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org, Guo Ren <guoren@...ux.alibaba.com>
Subject: [PATCH -next V13 2/3] riscv: stack: Support HAVE_SOFTIRQ_ON_OWN_STACK
From: Guo Ren <guoren@...ux.alibaba.com>
Add the HAVE_SOFTIRQ_ON_OWN_STACK feature for the IRQ_STACKS config, and
the irq and softirq use the same irq_stack of percpu.
Tested-by: Jisheng Zhang <jszhang@...nel.org>
Signed-off-by: Guo Ren <guoren@...ux.alibaba.com>
Signed-off-by: Guo Ren <guoren@...nel.org>
---
arch/riscv/Kconfig | 6 ++++--
arch/riscv/kernel/irq.c | 35 +++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index a8368fe7be14..f515cb101c19 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -591,11 +591,13 @@ config FPU
If you don't know what to do here, say Y.
config IRQ_STACKS
- bool "Independent irq stacks" if EXPERT
+ bool "Independent irq & softirq stacks" if EXPERT
default y
select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_SOFTIRQ_ON_OWN_STACK
help
- Add independent irq stacks for percpu to prevent kernel stack overflows.
+ Add independent irq & softirq stacks for percpu to prevent kernel stack
+ overflows. We may save some memory footprint by disabling IRQ_STACKS.
endmenu # "Platform type"
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index a1dcf8e43b3c..d0577cc6a081 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -11,6 +11,9 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <asm/sbi.h>
+#include <asm/smp.h>
+#include <asm/softirq_stack.h>
+#include <asm/stacktrace.h>
static struct fwnode_handle *(*__get_intc_node)(void);
@@ -56,6 +59,38 @@ static void init_irq_stacks(void)
per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
}
#endif /* CONFIG_VMAP_STACK */
+
+#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
+void do_softirq_own_stack(void)
+{
+#ifdef CONFIG_IRQ_STACKS
+ if (on_thread_stack()) {
+ ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
+ + IRQ_STACK_SIZE/sizeof(ulong);
+ __asm__ __volatile(
+ "addi sp, sp, -"RISCV_SZPTR "\n"
+ REG_S" ra, (sp) \n"
+ "addi sp, sp, -"RISCV_SZPTR "\n"
+ REG_S" s0, (sp) \n"
+ "addi s0, sp, 2*"RISCV_SZPTR "\n"
+ "move sp, %[sp] \n"
+ "call __do_softirq \n"
+ "addi sp, s0, -2*"RISCV_SZPTR"\n"
+ REG_L" s0, (sp) \n"
+ "addi sp, sp, "RISCV_SZPTR "\n"
+ REG_L" ra, (sp) \n"
+ "addi sp, sp, "RISCV_SZPTR "\n"
+ :
+ : [sp] "r" (sp)
+ : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ "memory");
+ } else
+#endif
+ __do_softirq();
+}
+#endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */
+
#else
static void init_irq_stacks(void) {}
#endif /* CONFIG_IRQ_STACKS */
--
2.36.1
Powered by blists - more mailing lists