lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220406025241.191300-4-liaochang1@huawei.com>
Date:   Wed, 6 Apr 2022 10:52:41 +0800
From:   Liao Chang <liaochang1@...wei.com>
To:     <mcgrof@...nel.org>, <keescook@...omium.org>, <yzaikin@...gle.com>,
        <liaochang1@...wei.com>, <tglx@...utronix.de>, <clg@...d.org>,
        <nitesh@...hat.com>, <edumazet@...gle.com>, <peterz@...radead.org>,
        <joshdon@...gle.com>, <masahiroy@...nel.org>, <nathan@...nel.org>,
        <akpm@...ux-foundation.org>, <vbabka@...e.cz>,
        <gustavoars@...nel.org>, <arnd@...db.de>, <chris@...isdown.name>,
        <dmitry.torokhov@...il.com>, <linux@...musvillemoes.dk>,
        <daniel@...earbox.net>, <john.ogness@...utronix.de>,
        <will@...nel.org>, <dave@...olabs.net>, <frederic@...nel.org>
CC:     <linux-kernel@...r.kernel.org>, <linux-fsdevel@...r.kernel.org>,
        <heying24@...wei.com>, <guohanjun@...wei.com>,
        <weiyongjun1@...wei.com>
Subject: [RFC 3/3] softirq: Introduce statistics about softirq throttling

This patch introduces counting the number of time spent on softirqs for
each CPU and the number of time that softirqs has been throttled for
each CPU, which are reported in /proc/softirqs, for example:

$cat /proc/softirqs
                    CPU0       CPU1       CPU2       CPU3
          HI:          0          0          0          0
       TIMER:       1088        855        197       4862
      NET_TX:          0          0          0          0
      NET_RX:         15          1          0          0
       BLOCK:         14         11         86         75
    IRQ_POLL:          0          0          0          0
     TASKLET:    5926026    6133070    5646523    6149053
       SCHED:      18061      15939      15746      16004
     HRTIMER:          0          0          0          0
         RCU:        668        778        939        720
                    CPU0       CPU1       CPU2       CPU3
 DURATION_MS:      91556      69888      66784      73772
 THROTTLE_MS:      77820       7328       5828       8904

Row starts with "DURATION_MS:" indicates how many milliseconds used for
softirqs on each CPU. Row starts with "THROTTLE_MS:" indicates how many
milliseconds softirq throttling lasted on each CPU.

Notice: the rate of "THROTTLE_MS" increase is controlled by parameter
"kernel.softirq_period_ms" and "kernel.softirq_runtime_ms", generally
speaking, the smaller softirq CPU bandwidth is, the faster "THROTTLE_MS"
increase, especially when pending softirq workload is very heavy.

Signed-off-by: Liao Chang <liaochang1@...wei.com>
---
 fs/proc/softirqs.c          | 18 ++++++++++++++++++
 include/linux/kernel_stat.h | 27 +++++++++++++++++++++++++++
 kernel/softirq.c            | 15 +++++++++++++--
 3 files changed, 58 insertions(+), 2 deletions(-)

diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c
index 12901dcf57e2..5ea3ede9833e 100644
--- a/fs/proc/softirqs.c
+++ b/fs/proc/softirqs.c
@@ -22,6 +22,24 @@ static int show_softirqs(struct seq_file *p, void *v)
 			seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
 		seq_putc(p, '\n');
 	}
+
+#ifdef CONFIG_SOFTIRQ_THROTTLE
+	seq_puts(p, "                    ");
+	for_each_possible_cpu(i)
+		seq_printf(p, "CPU%-8d", i);
+	seq_putc(p, '\n');
+
+	seq_printf(p, "%12s:", "DURATION_MS");
+	for_each_possible_cpu(j)
+		seq_printf(p, " %10lu", kstat_softirq_duration(j));
+	seq_putc(p, '\n');
+
+	seq_printf(p, "%12s:", "THROTTLE_MS");
+	for_each_possible_cpu(j)
+		seq_printf(p, " %10lu", kstat_softirq_throttle(j));
+	seq_putc(p, '\n');
+#endif
+
 	return 0;
 }
 
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 69ae6b278464..bbb52c55aad4 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -38,6 +38,11 @@ struct kernel_cpustat {
 struct kernel_stat {
 	unsigned long irqs_sum;
 	unsigned int softirqs[NR_SOFTIRQS];
+
+#ifdef CONFIG_SOFTIRQ_THROTTLE
+	unsigned long softirq_duration;
+	unsigned long softirq_throttle;
+#endif
 };
 
 DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -64,6 +69,28 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
        return kstat_cpu(cpu).softirqs[irq];
 }
 
+#ifdef CONFIG_SOFTIRQ_THROTTLE
+static inline unsigned long kstat_softirq_duration(int cpu)
+{
+	return jiffies_to_msecs(kstat_cpu(cpu).softirq_duration);
+}
+
+static inline unsigned long kstat_softirq_throttle(int cpu)
+{
+	return jiffies_to_msecs(kstat_cpu(cpu).softirq_throttle);
+}
+
+static inline unsigned long kstat_incr_softirq_duration(unsigned long delta)
+{
+	return kstat_this_cpu->softirq_duration += delta;
+}
+
+static inline unsigned long kstat_incr_softirq_throttle(unsigned long delta)
+{
+	return kstat_this_cpu->softirq_throttle += delta;
+}
+#endif
+
 /*
  * Number of interrupts per specific IRQ source, since bootup
  */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 6de6db794ac5..7fc0dc39f788 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -79,6 +79,7 @@ struct softirq_runtime {
 	bool	throttled;
 	unsigned long	duration;
 	unsigned long	expires;
+	unsigned long	throttled_ts;
 	raw_spinlock_t lock;
 };
 static DEFINE_PER_CPU(struct softirq_runtime, softirq_runtime);
@@ -94,12 +95,16 @@ static void forward_softirq_expires(struct softirq_runtime *si_runtime)
 static void update_softirq_runtime(unsigned long duration)
 {
 	struct softirq_runtime *si_runtime = this_cpu_ptr(&softirq_runtime);
+	unsigned long now = jiffies;
+
+	kstat_incr_softirq_duration(duration);
 
 	raw_spin_lock(&si_runtime->lock);
 	si_runtime->duration += jiffies_to_msecs(duration);
 	if ((si_runtime->duration >= si_throttle.runtime) &&
-		time_before(jiffies, si_runtime->expires)) {
+		time_before(now, si_runtime->expires)) {
 		si_runtime->throttled = true;
+		si_runtime->throttled_ts = now;
 	}
 	raw_spin_unlock(&si_runtime->lock);
 }
@@ -107,13 +112,17 @@ static void update_softirq_runtime(unsigned long duration)
 static bool softirq_runtime_exceeded(void)
 {
 	struct softirq_runtime *si_runtime = this_cpu_ptr(&softirq_runtime);
+	unsigned long now = jiffies;
 
 	if ((unsigned int)si_throttle.runtime >= si_throttle.period)
 		return false;
 
 	raw_spin_lock(&si_runtime->lock);
-	if (!time_before(jiffies, si_runtime->expires))
+	if (!time_before(now, si_runtime->expires)) {
+		if (si_runtime->throttled)
+			kstat_incr_softirq_throttle(now - si_runtime->throttled_ts);
 		forward_softirq_expires(si_runtime);
+	}
 	raw_spin_unlock(&si_runtime->lock);
 	return si_runtime->throttled;
 }
@@ -140,6 +149,8 @@ static void softirq_throttle_update(void)
 	for_each_possible_cpu(cpu, &) {
 		si_runtime = per_cpu_ptr(&softirq_runtime, cpu);
 		raw_spin_lock(&si_runtime->lock);
+		if (si_runtime->throttled)
+			kstat_incr_softirq_throttle(jiffies - si_runtime->throttled_ts);
 		forward_softirq_expires(si_runtime);
 		raw_spin_unlock(&si_runtime->lock);
 	}
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ