[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1451936091-29247-7-git-send-email-cmetcalf@ezchip.com>
Date: Mon, 4 Jan 2016 14:34:44 -0500
From: Chris Metcalf <cmetcalf@...hip.com>
To: Gilad Ben Yossef <giladb@...hip.com>,
Steven Rostedt <rostedt@...dmis.org>,
Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"Rik van Riel" <riel@...hat.com>, Tejun Heo <tj@...nel.org>,
Frederic Weisbecker <fweisbec@...il.com>,
Thomas Gleixner <tglx@...utronix.de>,
"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
Christoph Lameter <cl@...ux.com>,
Viresh Kumar <viresh.kumar@...aro.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
Andy Lutomirski <luto@...capital.net>,
<linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>
CC: Chris Metcalf <cmetcalf@...hip.com>
Subject: [PATCH v9 06/13] task_isolation: add debug boot flag
The new "task_isolation_debug" flag simplifies debugging
of TASK_ISOLATION kernels when processes are running in
PR_TASK_ISOLATION_ENABLE mode. Such processes should get no
interrupts from the kernel, and if they do, when this boot flag is
specified a kernel stack dump on the console is generated.
It's possible to use ftrace to simply detect whether a task_isolation
core has unexpectedly entered the kernel. But what this boot flag
does is allow the kernel to provide better diagnostics, e.g. by
reporting in the IPI-generating code what remote core and context
is preparing to deliver an interrupt to a task_isolation core.
It may be worth considering other ways to generate useful debugging
output rather than console spew, but for now that is simple and direct.
Signed-off-by: Chris Metcalf <cmetcalf@...hip.com>
---
Documentation/kernel-parameters.txt | 8 +++++
include/linux/isolation.h | 5 ++++
kernel/irq_work.c | 5 +++-
kernel/isolation.c | 60 +++++++++++++++++++++++++++++++++++++
kernel/sched/core.c | 18 +++++++++++
kernel/signal.c | 5 ++++
kernel/smp.c | 6 +++-
kernel/softirq.c | 33 ++++++++++++++++++++
8 files changed, 138 insertions(+), 2 deletions(-)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e035679e646e..112fba1727f4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3673,6 +3673,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
also sets up nohz_full and isolcpus mode for the
listed set of cpus.
+ task_isolation_debug [KNL]
+ In kernels built with CONFIG_TASK_ISOLATION
+ and booted in task_isolation= mode, this
+ setting will generate console backtraces when
+ the kernel is about to interrupt a task that
+ has requested PR_TASK_ISOLATION_ENABLE and is
+ running on a task_isolation core.
+
tcpmhash_entries= [KNL,NET]
Set the number of tcp_metrics_hash slots.
Default value is 8192 or 16384 depending on total
diff --git a/include/linux/isolation.h b/include/linux/isolation.h
index 69a3e4c59ab3..3e15e75d078f 100644
--- a/include/linux/isolation.h
+++ b/include/linux/isolation.h
@@ -43,6 +43,9 @@ static inline void task_isolation_enter(void)
extern bool task_isolation_syscall(int nr);
extern void task_isolation_exception(const char *fmt, ...);
extern void task_isolation_interrupt(struct task_struct *, const char *buf);
+extern void task_isolation_debug(int cpu);
+extern void task_isolation_debug_cpumask(const struct cpumask *);
+extern void task_isolation_debug_task(int cpu, struct task_struct *p);
static inline bool task_isolation_strict(void)
{
@@ -70,6 +73,8 @@ static inline bool task_isolation_ready(void) { return true; }
static inline void task_isolation_enter(void) { }
static inline bool task_isolation_check_syscall(int nr) { return false; }
static inline void task_isolation_check_exception(const char *fmt, ...) { }
+static inline void task_isolation_debug(int cpu) { }
+#define task_isolation_debug_cpumask(mask) do {} while (0)
#endif
#endif
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index bcf107ce0854..a9b95ce00667 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
+#include <linux/isolation.h>
#include <asm/processor.h>
@@ -75,8 +76,10 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work))
return false;
- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+ if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) {
+ task_isolation_debug(cpu);
arch_send_call_function_single_ipi(cpu);
+ }
return true;
}
diff --git a/kernel/isolation.c b/kernel/isolation.c
index 29ffb21ada0b..9f31c0b458ed 100644
--- a/kernel/isolation.c
+++ b/kernel/isolation.c
@@ -11,6 +11,7 @@
#include <linux/vmstat.h>
#include <linux/isolation.h>
#include <linux/syscalls.h>
+#include <linux/ratelimit.h>
#include <asm/unistd.h>
#include "time/tick-sched.h"
@@ -163,3 +164,62 @@ bool task_isolation_syscall(int syscall)
task_isolation_exception("syscall %d", syscall);
return true;
}
+
+/* Enable debugging of any interrupts of task_isolation cores. */
+static int task_isolation_debug_flag;
+static int __init task_isolation_debug_func(char *str)
+{
+ task_isolation_debug_flag = true;
+ return 1;
+}
+__setup("task_isolation_debug", task_isolation_debug_func);
+
+void task_isolation_debug_task(int cpu, struct task_struct *p)
+{
+ static DEFINE_RATELIMIT_STATE(console_output, HZ, 1);
+ bool force_debug = false;
+
+ /*
+ * Our caller made sure the task was running on a task isolation
+ * core, but make sure the task has enabled isolation.
+ */
+ if (!(p->task_isolation_flags & PR_TASK_ISOLATION_ENABLE))
+ return;
+
+ /*
+ * If the task was in strict mode, deliver a signal to it.
+ * We disable task isolation mode when we deliver a signal
+ * so we won't end up recursing back here again.
+ * If we are in an NMI, we don't try delivering the signal
+ * and instead just treat it as if "debug" mode was enabled,
+ * since that's pretty much all we can do.
+ */
+ if (p->task_isolation_flags & PR_TASK_ISOLATION_STRICT) {
+ if (in_nmi())
+ force_debug = true;
+ else
+ task_isolation_interrupt(p, "interrupt");
+ }
+
+ /*
+ * If (for example) the timer interrupt starts ticking
+ * unexpectedly, we will get an unmanageable flow of output,
+ * so limit to one backtrace per second.
+ */
+ if (force_debug ||
+ (task_isolation_debug_flag && __ratelimit(&console_output))) {
+ pr_err("Interrupt detected for task_isolation cpu %d, %s/%d\n",
+ cpu, p->comm, p->pid);
+ dump_stack();
+ }
+}
+
+void task_isolation_debug_cpumask(const struct cpumask *mask)
+{
+ int cpu, thiscpu = smp_processor_id();
+
+ /* No need to report on this cpu since we're already in the kernel. */
+ for_each_cpu(cpu, mask)
+ if (cpu != thiscpu)
+ task_isolation_debug(cpu);
+}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 732e993b564b..700120221f6b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -74,6 +74,7 @@
#include <linux/binfmts.h>
#include <linux/context_tracking.h>
#include <linux/compiler.h>
+#include <linux/isolation.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -746,6 +747,23 @@ bool sched_can_stop_tick(void)
}
#endif /* CONFIG_NO_HZ_FULL */
+#ifdef CONFIG_TASK_ISOLATION
+void task_isolation_debug(int cpu)
+{
+ struct task_struct *p;
+
+ if (!task_isolation_possible(cpu))
+ return;
+
+ rcu_read_lock();
+ p = cpu_curr(cpu);
+ get_task_struct(p);
+ rcu_read_unlock();
+ task_isolation_debug_task(cpu, p);
+ put_task_struct(p);
+}
+#endif
+
void sched_avg_update(struct rq *rq)
{
s64 period = sched_avg_period();
diff --git a/kernel/signal.c b/kernel/signal.c
index f3f1f7a972fd..c45ef71f329c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -638,6 +638,11 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
*/
void signal_wake_up_state(struct task_struct *t, unsigned int state)
{
+#ifdef CONFIG_TASK_ISOLATION
+ /* If the task is being killed, don't complain about task_isolation. */
+ if (state & TASK_WAKEKILL)
+ t->task_isolation_flags = 0;
+#endif
set_tsk_thread_flag(t, TIF_SIGPENDING);
/*
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable
diff --git a/kernel/smp.c b/kernel/smp.c
index d903c02223af..a61894409645 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -14,6 +14,7 @@
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
+#include <linux/isolation.h>
#include "smpboot.h"
@@ -178,8 +179,10 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
* locking and barrier primitives. Generic code isn't really
* equipped to do the right thing...
*/
- if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
+ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) {
+ task_isolation_debug(cpu);
arch_send_call_function_single_ipi(cpu);
+ }
return 0;
}
@@ -457,6 +460,7 @@ void smp_call_function_many(const struct cpumask *mask,
}
/* Send a message to all CPUs in the map */
+ task_isolation_debug_cpumask(cfd->cpumask);
arch_send_call_function_ipi_mask(cfd->cpumask);
if (wait) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 479e4436f787..f249b71cddf4 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -26,6 +26,7 @@
#include <linux/smpboot.h>
#include <linux/tick.h>
#include <linux/irq.h>
+#include <linux/isolation.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -319,6 +320,37 @@ asmlinkage __visible void do_softirq(void)
local_irq_restore(flags);
}
+/* Determine whether this IRQ is something task isolation cares about. */
+static void task_isolation_irq(void)
+{
+#ifdef CONFIG_TASK_ISOLATION
+ struct pt_regs *regs;
+
+ if (!context_tracking_cpu_is_enabled())
+ return;
+
+ /*
+ * We have not yet called __irq_enter() and so we haven't
+ * adjusted the hardirq count. This test will allow us to
+ * avoid false positives for nested IRQs.
+ */
+ if (in_interrupt())
+ return;
+
+ /*
+ * If we were already in the kernel, not from an irq but from
+ * a syscall or synchronous exception/fault, this test should
+ * avoid a false positive as well. Note that this requires
+ * architecture support for calling set_irq_regs() prior to
+ * calling irq_enter(), and if it's not done consistently, we
+ * will not consistently avoid false positives here.
+ */
+ regs = get_irq_regs();
+ if (regs && user_mode(regs))
+ task_isolation_debug(smp_processor_id());
+#endif
+}
+
/*
* Enter an interrupt context.
*/
@@ -335,6 +367,7 @@ void irq_enter(void)
_local_bh_enable();
}
+ task_isolation_irq();
__irq_enter();
}
--
2.1.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists