[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20110518033647.GB11023@google.com>
Date: Tue, 17 May 2011 20:36:47 -0700
From: Mandeep Singh Baines <msb@...omium.org>
To: Ingo Molnar <mingo@...e.hu>
Cc: Mandeep Singh Baines <msb@...omium.org>,
linux-kernel@...r.kernel.org,
Marcin Slusarz <marcin.slusarz@...il.com>,
Don Zickus <dzickus@...hat.com>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Frederic Weisbecker <fweisbec@...il.com>
Subject: [PATCH 3/4 v2] watchdog: disable watchdog when thresh is zero
Ingo Molnar (mingo@...e.hu) wrote:
> This now adds two similar looking blocks of these 4 lines, one in
> proc_dowatchdog_enabled(), one in proc_dowatchdog_thresh().
>
> They are not the same though. So what happens if the watchdog is disabled but
> the threshold is updated to nonzero - do we enable the watchdog?
>
Good point. Fixed in this patch (v2). Also modified the patch to
disable the watchdog if thresh == 0 or enabled == 0.
I used a #define for proc_dowatchdog instead of an inline to avoid
duplicating the large parameter list.
---
This restores the previous behavior of softlock_thresh.
Currently, setting watchdog_thresh to zero causes the watchdog
kthreads to consume a lot of CPU.
In addition, the logic of proc_dowatchdog_thresh and proc_dowatchdog_enabled
has been factored into __proc_dowatchdog.
Signed-off-by: Mandeep Singh Baines <msb@...omium.org>
LKML-Reference: <20110517071018.GE22305@...e.hu>
Cc: Marcin Slusarz <marcin.slusarz@...il.com>
Cc: Don Zickus <dzickus@...hat.com>
Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc: Frederic Weisbecker <fweisbec@...il.com>
Cc: Ingo Molnar <mingo@...e.hu>
---
include/linux/nmi.h | 7 +++++--
include/linux/sched.h | 1 -
kernel/sysctl.c | 4 +++-
kernel/watchdog.c | 25 +++++++++----------------
4 files changed, 17 insertions(+), 20 deletions(-)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index c536f85..10cbca7 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -47,9 +47,12 @@ static inline bool trigger_all_cpu_backtrace(void)
int hw_nmi_is_cpu_stuck(struct pt_regs *);
u64 hw_nmi_get_sample_period(void);
extern int watchdog_enabled;
+extern int watchdog_thresh;
struct ctl_table;
-extern int proc_dowatchdog_enabled(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
+extern int __proc_dowatchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+#define proc_dowatchdog_enabled __proc_dowatchdog
+#define proc_dowatchdog_thresh __proc_dowatchdog
#endif
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 12211e1..d8b2d0b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -315,7 +315,6 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
-extern int softlockup_thresh;
void lockup_detector_init(void);
#else
static inline void touch_softlockup_watchdog(void)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c0bb324..acb12f4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -731,10 +731,12 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = proc_dowatchdog_enabled,
+ .extra1 = &zero,
+ .extra2 = &one,
},
{
.procname = "watchdog_thresh",
- .data = &softlockup_thresh,
+ .data = &watchdog_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dowatchdog_thresh,
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index cf0e09f..ea3dfc2 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -28,7 +28,7 @@
#include <linux/perf_event.h>
int watchdog_enabled = 1;
-int __read_mostly softlockup_thresh = 60;
+int __read_mostly watchdog_thresh = 60;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
@@ -105,12 +105,12 @@ static unsigned long get_timestamp(int this_cpu)
static unsigned long get_sample_period(void)
{
/*
- * convert softlockup_thresh from seconds to ns
+ * convert watchdog_thresh from seconds to ns
* the divide by 5 is to give hrtimer 5 chances to
* increment before the hardlockup detector generates
* a warning
*/
- return softlockup_thresh * (NSEC_PER_SEC / 5);
+ return watchdog_thresh * (NSEC_PER_SEC / 5);
}
/* Commands for resetting the watchdog */
@@ -182,7 +182,7 @@ static int is_softlockup(unsigned long touch_ts)
unsigned long now = get_timestamp(smp_processor_id());
/* Warn about unreasonable delays: */
- if (time_after(now, touch_ts + softlockup_thresh))
+ if (time_after(now, touch_ts + watchdog_thresh))
return now - touch_ts;
return 0;
@@ -501,19 +501,19 @@ static void watchdog_disable_all_cpus(void)
/* sysctl functions */
#ifdef CONFIG_SYSCTL
/*
- * proc handler for /proc/sys/kernel/nmi_watchdog
+ * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
*/
-int proc_dowatchdog_enabled(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos)
+int __proc_dowatchdog(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
- ret = proc_dointvec(table, write, buffer, length, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
goto out;
- if (watchdog_enabled)
+ if (watchdog_enabled && watchdog_thresh)
watchdog_enable_all_cpus();
else
watchdog_disable_all_cpus();
@@ -521,13 +521,6 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write,
out:
return ret;
}
-
-int proc_dowatchdog_thresh(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-}
#endif /* CONFIG_SYSCTL */
--
1.7.3.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists