lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <1629689063-21252-1-git-send-email-wangqing@vivo.com>
Date:   Mon, 23 Aug 2021 11:24:19 +0800
From:   Wang Qing <wangqing@...o.com>
To:     Thomas Gleixner <tglx@...utronix.de>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...nel.org>,
        Randy Dunlap <rdunlap@...radead.org>,
        Wang Qing <wangqing@...o.com>, Hugh Dickins <hughd@...gle.com>,
        Michal Hocko <mhocko@...nel.org>,
        Frederic Weisbecker <frederic@...nel.org>,
        Davidlohr Bueso <dave@...olabs.net>,
        Will Deacon <will@...nel.org>,
        Dirk Behme <dirk.behme@...bosch.com>,
        linux-kernel@...r.kernel.org
Subject: [PATCH] softirq: Introduce SOFTIRQ_FORCED_THREADING

At present, whether the softirq is executed when the interrupt exits 
is controlled by IRQ_FORCED_THREADING. This is unreasonable. It should 
be split and allowed to take effect separately.

At the same time, we should increase the priority of ksoftirqd when
forbidden to execute in interrupt exits. I refer to the implementation 
of PREEMPT_RT and think it is reasonable.

Signed-off-by: Wang Qing <wangqing@...o.com>
---
 kernel/Kconfig.preempt | 10 ++++++++++
 kernel/irq/Kconfig     |  3 ++-
 kernel/softirq.c       | 21 ++++++++++++++++++++-
 3 files changed, 32 insertions(+), 2 deletions(-)

diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 5876e30..42d60e7
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -60,6 +60,7 @@ config PREEMPT_RT
 	bool "Fully Preemptible Kernel (Real-Time)"
 	depends on EXPERT && ARCH_SUPPORTS_RT
 	select PREEMPTION
+	select SOFTIRQ_FORCED_THREADING
 	help
 	  This option turns the kernel into a real-time kernel by replacing
 	  various locking primitives (spinlocks, rwlocks, etc.) with
@@ -118,4 +119,13 @@ config SCHED_CORE
 	  which is the likely usage by Linux distributions, there should
 	  be no measurable impact on performance.
 
+config SOFTIRQ_FORCED_THREADING
+	bool "Balance softirq execute"
+	help
+	 This option will force the softirq to be executed in ksoftirqd,
+	 cancel its execution timing when the interrupt exits, and change
+	 ksoftirqd to a real-time process.

+	 In this way, the execution of softirq can be executed more balanced,
+	 and the maximum scheduling delay caused by the execution of softirq
+	 in the RT process can be reduced.
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index fbc54c2..ecd3236
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -111,7 +111,8 @@ config GENERIC_IRQ_RESERVATION_MODE
 
 # Support forced irq threading
 config IRQ_FORCED_THREADING
-       bool
+    bool
+	select SOFTIRQ_FORCED_THREADING
 
 config SPARSE_IRQ
 	bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f3a0121..f02f0d9
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,6 +29,7 @@
 #include <linux/wait_bit.h>
 
 #include <asm/softirq_stack.h>
+#include <uapi/linux/sched/types.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/irq.h>
@@ -417,12 +418,18 @@ static inline bool should_wake_ksoftirqd(void)
 	return true;
 }
 
+#ifdef CONFIG_SOFTIRQ_FORCED_THREADING
+static inline void invoke_softirq(void)
+{
+	wakeup_softirqd();
+}
+#else
 static inline void invoke_softirq(void)
 {
 	if (ksoftirqd_running(local_softirq_pending()))
 		return;
 
-	if (!force_irqthreads || !__this_cpu_read(ksoftirqd)) {
+	if (!__this_cpu_read(ksoftirqd)) {
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
 		/*
 		 * We can safely execute softirq on the current stack if
@@ -442,6 +449,7 @@ static inline void invoke_softirq(void)
 		wakeup_softirqd();
 	}
 }
+#endif
 
 asmlinkage __visible void do_softirq(void)
 {
@@ -909,6 +917,14 @@ static int ksoftirqd_should_run(unsigned int cpu)
 	return local_softirq_pending();
 }
 
+#ifndef CONFIG_SOFTIRQ_FORCED_THREADING
+static void ksoftirqd_set_sched_params(unsigned int cpu)
+{
+	struct sched_param param = { .sched_priority = 1 };
+	sched_setscheduler(current, SCHED_FIFO, &param);
+}
+#endif
+
 static void run_ksoftirqd(unsigned int cpu)
 {
 	ksoftirqd_run_begin();
@@ -957,6 +973,9 @@ static int takeover_tasklets(unsigned int cpu)
 
 static struct smp_hotplug_thread softirq_threads = {
 	.store			= &ksoftirqd,
+#ifndef CONFIG_SOFTIRQ_FORCED_THREADING
+	.setup			= ksoftirqd_set_sched_params,
+#endif
 	.thread_should_run	= ksoftirqd_should_run,
 	.thread_fn		= run_ksoftirqd,
 	.thread_comm		= "ksoftirqd/%u",
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ