[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240123121223.22318-4-yaoma@linux.alibaba.com>
Date: Tue, 23 Jan 2024 20:12:23 +0800
From: Bitao Hu <yaoma@...ux.alibaba.com>
To: dianders@...omium.org,
akpm@...ux-foundation.org,
pmladek@...e.com,
tglx@...utronix.de,
maz@...nel.org,
liusong@...ux.alibaba.com
Cc: linux-kernel@...r.kernel.org,
Bitao Hu <yaoma@...ux.alibaba.com>
Subject: [PATCH 3/3] watchdog/softlockup: add parameter to control the reporting of time-consuming hardirq
To obtain a more accurate cause of softlockup, we use tracepoints to
measure the time of each hardirq, which may have some impact on
performance. A parameter could be added to allow users to enable
this feature on demand.
Signed-off-by: Bitao Hu <yaoma@...ux.alibaba.com>
---
kernel/watchdog.c | 51 ++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 48 insertions(+), 3 deletions(-)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index f347c5d8c5c1..314dfd301d8c 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -303,6 +303,9 @@ unsigned int __read_mostly softlockup_panic =
static bool softlockup_initialized __read_mostly;
static u64 __read_mostly sample_period;
+static int __read_mostly softlockup_irqtrace;
+static bool softlockup_irqtrace_initialized __read_mostly;
+
/* Timestamp taken after the last successful reschedule. */
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
/* Timestamp of the last softlockup report. */
@@ -318,6 +321,13 @@ static int __init softlockup_panic_setup(char *str)
}
__setup("softlockup_panic=", softlockup_panic_setup);
+static int __init softlockup_irqtrace_setup(char *str)
+{
+ get_option(&str, &softlockup_irqtrace);
+ return 1;
+}
+__setup("softlockup_irqtrace=", softlockup_irqtrace_setup);
+
static int __init nowatchdog_setup(char *str)
{
watchdog_user_enabled = 0;
@@ -635,7 +645,7 @@ static void print_hardirq_time(void)
u64 start_time, now, a;
u32 period_us, i, b;
- if (test_bit(SOFTLOCKUP_HARDIRQ, this_cpu_ptr(&softlockup_flags))) {
+ if (softlockup_irqtrace && test_bit(SOFTLOCKUP_HARDIRQ, this_cpu_ptr(&softlockup_flags))) {
start_time = __this_cpu_read(hardirq_start_time);
now = local_clock();
period_us = (now - start_time)/1000;
@@ -856,7 +866,10 @@ static void softlockup_stop_all(void)
if (!softlockup_initialized)
return;
- unhook_hardirq_events();
+ if (softlockup_irqtrace_initialized) {
+ unhook_hardirq_events();
+ softlockup_irqtrace_initialized = false;
+ }
for_each_cpu(cpu, &watchdog_allowed_mask)
smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
@@ -874,7 +887,10 @@ static void softlockup_start_all(void)
{
int cpu;
- hook_hardirq_events();
+ if (softlockup_irqtrace && !softlockup_irqtrace_initialized) {
+ hook_hardirq_events();
+ softlockup_irqtrace_initialized = true;
+ }
cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
for_each_cpu(cpu, &watchdog_allowed_mask)
@@ -1090,6 +1106,26 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
return err;
}
+/*
+ * /proc/sys/kernel/softlockup_irqtrace
+ */
+int proc_softlockup_irqtrace(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int err, old;
+
+ mutex_lock(&watchdog_mutex);
+
+ old = READ_ONCE(softlockup_irqtrace);
+ err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (!err && write && old != READ_ONCE(softlockup_irqtrace))
+ proc_watchdog_update();
+
+ mutex_unlock(&watchdog_mutex);
+ return err;
+}
+
/*
* The cpumask is the mask of possible cpus that the watchdog can run
* on, not the mask of cpus it is actually running on. This allows the
@@ -1158,6 +1194,15 @@ static struct ctl_table watchdog_sysctls[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
+ {
+ .procname = "softlockup_irqtrace",
+ .data = &softlockup_irqtrace,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_softlockup_irqtrace,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
#ifdef CONFIG_SMP
{
.procname = "softlockup_all_cpu_backtrace",
--
2.37.1 (Apple Git-137.1)
Powered by blists - more mailing lists