lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 20 Jul 2020 09:08:26 -0400
From:   qianjun.kernel@...il.com
To:     tglx@...utronix.de, peterz@...radead.org, will@...nel.org,
        luto@...nel.org, linux-kernel@...r.kernel.org, urezki@...il.com
Cc:     laoar.shao@...il.com, jun qian <qianjun.kernel@...il.com>
Subject: [RFC PATCH v2] Softirq:avoid large sched delay from the pending softirqs

From: jun qian <qianjun.kernel@...il.com>

When get the pending softirqs, it need to process all the pending
softirqs in the while loop. If the processing time of each pending
softirq is need more than 2 msec in this loop, or one of the softirq
will running a long time, according to the original code logic, it
will process all the pending softirqs without wakeuping ksoftirqd,
which will cause a relatively large scheduling delay on the
corresponding CPU, which we do not wish to see. The patch will check
the total time to process pending softirq, if the time exceeds 2 ms
we need to wakeup the ksofirqd to aviod large sched delay.

Signed-off-by: jun qian <qianjun.kernel@...il.com>
---
 kernel/softirq.c | 20 +++++++++++++++++---
 1 file changed, 17 insertions(+), 3 deletions(-)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index c4201b7f..f8e5be9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -210,7 +210,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
  * we want to handle softirqs as soon as possible, but they
  * should not be able to lock up the box.
  */
-#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
+#define MAX_SOFTIRQ_TIME 2000	/* In microseconds */
 #define MAX_SOFTIRQ_RESTART 10
 
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -248,7 +248,8 @@ static inline void lockdep_softirq_end(bool in_hardirq) { }
 
 asmlinkage __visible void __softirq_entry __do_softirq(void)
 {
-	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
+	ktime_t end, start;
+	s64 delta;
 	unsigned long old_flags = current->flags;
 	int max_restart = MAX_SOFTIRQ_RESTART;
 	struct softirq_action *h;
@@ -256,6 +257,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
 	__u32 pending;
 	int softirq_bit;
 
+	start = ktime_get();
+
 	/*
 	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
 	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
@@ -299,6 +302,15 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
 		}
 		h++;
 		pending >>= softirq_bit;
+
+		end = ktime_get();
+		delta = ktime_to_us(end - start);
+		/*
+		 * the softirq's action has been running for too much time
+		 * so it may need to wakeup the ksoftirqd
+		 */
+		if (delta > MAX_SOFTIRQ_TIME && need_resched())
+			break;
 	}
 
 	if (__this_cpu_read(ksoftirqd) == current)
@@ -307,7 +319,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
 
 	pending = local_softirq_pending();
 	if (pending) {
-		if (time_before(jiffies, end) && !need_resched() &&
+		end = ktime_get();
+		delta = ktime_to_us(end - start);
+		if (delta < MAX_SOFTIRQ_TIME && !need_resched() &&
 		    --max_restart)
 			goto restart;
 
-- 
1.8.3.1

Powered by blists - more mailing lists