[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1516077640-19718-2-git-send-email-frederic@kernel.org>
Date: Tue, 16 Jan 2018 05:40:36 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Levin Alexander <alexander.levin@...izon.com>,
Peter Zijlstra <peterz@...radead.org>,
Mauro Carvalho Chehab <mchehab@...pensource.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Hannes Frederic Sowa <hannes@...essinduktion.org>,
"Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>,
Wanpeng Li <wanpeng.li@...mail.com>,
Dmitry Safonov <dima@...sta.com>,
Thomas Gleixner <tglx@...utronix.de>,
Eric Dumazet <edumazet@...gle.com>,
Radu Rendec <rrendec@...sta.com>,
Ingo Molnar <mingo@...nel.org>,
Stanislaw Gruszka <sgruszka@...hat.com>,
Paolo Abeni <pabeni@...hat.com>,
Rik van Riel <riel@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
David Miller <davem@...emloft.net>
Subject: [RFC PATCH 1/5] softirq: Account time and iteration stats per vector
As we plan to be able to defer some specific softirq vector processing
to workqueues when those vectors need more time than IRQs can offer,
let's first introduce the per-vector call counter/limit.
Each softirq vector is allowed to be called on IRQ tail at most
MAX_SOFTIRQ_RESTART per jiffy. Once we reach that limit, the softirq
processing is deferred to ksoftirqd. The threading will be divided to
per vector worklets in further patches.
Suggested-by: Linus Torvalds <torvalds@...ux-foundation.org>
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Dmitry Safonov <dima@...sta.com>
Cc: Eric Dumazet <edumazet@...gle.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: David Miller <davem@...emloft.net>
Cc: Hannes Frederic Sowa <hannes@...essinduktion.org>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Levin Alexander <alexander.levin@...izon.com>
Cc: Paolo Abeni <pabeni@...hat.com>
Cc: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
Cc: Radu Rendec <rrendec@...sta.com>
Cc: Rik van Riel <riel@...hat.com>
Cc: Stanislaw Gruszka <sgruszka@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Wanpeng Li <wanpeng.li@...mail.com>
Cc: Mauro Carvalho Chehab <mchehab@...pensource.com>
---
kernel/softirq.c | 41 ++++++++++++++++++++++++++---------------
1 file changed, 26 insertions(+), 15 deletions(-)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2f5e87f..e0f4b29 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -62,6 +62,15 @@ const char * const softirq_to_name[NR_SOFTIRQS] = {
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
+struct vector {
+ unsigned int jiffy_calls;
+ unsigned long jiffy_snap;
+};
+
+static DEFINE_PER_CPU(struct vector, vector_cpu[NR_SOFTIRQS]) = {
+ [0 ... NR_SOFTIRQS-1] = { 0, INITIAL_JIFFIES }
+};
+
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
@@ -192,19 +201,13 @@ EXPORT_SYMBOL(__local_bh_enable_ip);
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
- * but break the loop if need_resched() is set or after 2 ms.
- * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
- * certain cases, such as stop_machine(), jiffies may cease to
- * increment and so we need the MAX_SOFTIRQ_RESTART limit as
- * well to make sure we eventually return from this method.
- *
+ * but break the loop if need_resched() is set.
* These limits have been established via experimentation.
* The two things to balance is latency against fairness -
* we want to handle softirqs as soon as possible, but they
* should not be able to lock up the box.
*/
-#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
-#define MAX_SOFTIRQ_RESTART 10
+#define MAX_SOFTIRQ_RESTART 20
#ifdef CONFIG_TRACE_IRQFLAGS
/*
@@ -241,12 +244,10 @@ static inline void lockdep_softirq_end(bool in_hardirq) { }
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
- unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
- int max_restart = MAX_SOFTIRQ_RESTART;
struct softirq_action *h;
bool in_hardirq;
- __u32 pending;
+ __u32 pending, overrun = 0;
int softirq_bit;
/*
@@ -271,6 +272,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
h = softirq_vec;
while ((softirq_bit = ffs(pending))) {
+ struct vector *vector;
unsigned int vec_nr;
int prev_count;
@@ -284,6 +286,16 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
trace_softirq_entry(vec_nr);
h->action(h);
trace_softirq_exit(vec_nr);
+
+ vector = this_cpu_ptr(&vector_cpu[vec_nr]);
+ if (time_before(vector->jiffy_snap, jiffies)) {
+ vector->jiffy_calls = 0;
+ vector->jiffy_snap = jiffies;
+ }
+
+ if (++vector->jiffy_calls > MAX_SOFTIRQ_RESTART)
+ overrun |= 1 << vec_nr;
+
if (unlikely(prev_count != preempt_count())) {
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
vec_nr, softirq_to_name[vec_nr], h->action,
@@ -299,11 +311,10 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
pending = local_softirq_pending();
if (pending) {
- if (time_before(jiffies, end) && !need_resched() &&
- --max_restart)
+ if (overrun || need_resched())
+ wakeup_softirqd();
+ else
goto restart;
-
- wakeup_softirqd();
}
lockdep_softirq_end(in_hardirq);
--
2.7.4
Powered by blists - more mailing lists