[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221016162305.2489629-14-joel@joelfernandes.org>
Date: Sun, 16 Oct 2022 16:23:05 +0000
From: "Joel Fernandes (Google)" <joel@...lfernandes.org>
To: rcu@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, frederic@...nel.org,
paulmck@...nel.org,
"Joel Fernandes (Google)" <joel@...lfernandes.org>
Subject: [PATCH v9 13/13] rcu/debug: Add wake-up debugging for lazy callbacks
This patch adds initial debugging for lazy callback: whether the
callback does a wake up or not. We see that callbacks doing wake ups are
usually associated with synchronous use cases (SCSI, rcu_sync,
synchronize_rcu() etc).
The code is not very intrusive as almost all the logic is in
'lazy-debug.h' with just a few calls from tree.c
In the future, we will add more functionalities such as ensuring
callbacks execute in bounded time.
Signed-off-by: Joel Fernandes (Google) <joel@...lfernandes.org>
---
kernel/rcu/Kconfig | 7 ++
kernel/rcu/lazy-debug.h | 154 ++++++++++++++++++++++++++++++++++++++++
kernel/rcu/tree.c | 9 +++
3 files changed, 170 insertions(+)
create mode 100644 kernel/rcu/lazy-debug.h
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index edd632e68497..08c06f739187 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -322,4 +322,11 @@ config RCU_LAZY
To save power, batch RCU callbacks and flush after delay, memory
pressure or callback list growing too big.
+config RCU_LAZY_DEBUG
+ bool "RCU callback lazy invocation debugging"
+ depends on RCU_LAZY
+ default n
+ help
+ Debugging to catch issues caused by delayed RCU callbacks.
+
endmenu # "RCU Subsystem"
diff --git a/kernel/rcu/lazy-debug.h b/kernel/rcu/lazy-debug.h
new file mode 100644
index 000000000000..b8399b51d06a
--- /dev/null
+++ b/kernel/rcu/lazy-debug.h
@@ -0,0 +1,154 @@
+#include <linux/string.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_RCU_LAZY_DEBUG
+#include <linux/preempt.h>
+#include <trace/events/sched.h>
+
+static DEFINE_PER_CPU(bool, rcu_lazy_cb_exec) = false;
+static DEFINE_PER_CPU(void *, rcu_lazy_ip) = NULL;
+
+static DEFINE_RAW_SPINLOCK(lazy_funcs_lock);
+
+#define FUNC_SIZE 1024
+static unsigned long lazy_funcs[FUNC_SIZE];
+static int nr_funcs;
+
+static void __find_func(unsigned long ip, int *B, int *E, int *N)
+{
+ unsigned long *p;
+ int b, e, n;
+
+ b = n = 0;
+ e = nr_funcs - 1;
+
+ while (b <= e) {
+ n = (b + e) / 2;
+ p = &lazy_funcs[n];
+ if (ip > *p) {
+ b = n + 1;
+ } else if (ip < *p) {
+ e = n - 1;
+ } else
+ break;
+ }
+
+ *B = b;
+ *E = e;
+ *N = n;
+
+ return;
+}
+
+static bool lazy_func_exists(void* ip_ptr)
+{
+ int b, e, n;
+ unsigned long flags;
+ unsigned long ip = (unsigned long)ip_ptr;
+
+ raw_spin_lock_irqsave(&lazy_funcs_lock, flags);
+ __find_func(ip, &b, &e, &n);
+ raw_spin_unlock_irqrestore(&lazy_funcs_lock, flags);
+
+ return b <= e;
+}
+
+static int lazy_func_add(void* ip_ptr)
+{
+ int b, e, n;
+ unsigned long flags;
+ unsigned long ip = (unsigned long)ip_ptr;
+
+ raw_spin_lock_irqsave(&lazy_funcs_lock, flags);
+ if (nr_funcs >= FUNC_SIZE) {
+ raw_spin_unlock_irqrestore(&lazy_funcs_lock, flags);
+ return -1;
+ }
+
+ __find_func(ip, &b, &e, &n);
+
+ if (b > e) {
+ if (n != nr_funcs)
+ memmove(&lazy_funcs[n+1], &lazy_funcs[n],
+ (sizeof(*lazy_funcs) * (nr_funcs - n)));
+
+ lazy_funcs[n] = ip;
+ nr_funcs++;
+ }
+
+ raw_spin_unlock_irqrestore(&lazy_funcs_lock, flags);
+ return 0;
+}
+
+static void rcu_set_lazy_context(void *ip_ptr)
+{
+ bool *flag = this_cpu_ptr(&rcu_lazy_cb_exec);
+ *flag = lazy_func_exists(ip_ptr);
+
+ if (*flag) {
+ *this_cpu_ptr(&rcu_lazy_ip) = ip_ptr;
+ } else {
+ *this_cpu_ptr(&rcu_lazy_ip) = NULL;
+ }
+}
+
+static void rcu_reset_lazy_context(void)
+{
+ bool *flag = this_cpu_ptr(&rcu_lazy_cb_exec);
+ *flag = false;
+}
+
+static bool rcu_is_lazy_context(void)
+{
+ return *(this_cpu_ptr(&rcu_lazy_cb_exec));
+}
+
+static void
+probe_waking(void *ignore, struct task_struct *p)
+{
+ // kworker wake ups don't appear to cause performance issues.
+ // Ignore for now.
+ if (!strncmp(p->comm, "kworker", 7))
+ return;
+
+ if (WARN_ON(!in_nmi() && !in_hardirq() && rcu_is_lazy_context())) {
+ pr_err("*****************************************************\n");
+ pr_err("RCU: A wake up has been detected from a lazy callback!\n");
+ pr_err("The callback name is: %ps\n", *this_cpu_ptr(&rcu_lazy_ip));
+ pr_err("The task it woke up is: %s (%d)\n", p->comm, p->pid);
+ pr_err("This could cause performance issues! Check the stack.\n");
+ pr_err("*****************************************************\n");
+ }
+}
+
+static void rcu_lazy_debug_init(void)
+{
+ int ret;
+ pr_info("RCU Lazy CB debugging is turned on, system may be slow.\n");
+
+ ret = register_trace_sched_waking(probe_waking, NULL);
+ if (ret)
+ pr_info("RCU: Lazy debug ched_waking probe could not be registered.");
+}
+
+#else
+
+static int lazy_func_add(void* ip_ptr)
+{
+ return -1;
+}
+
+
+static void rcu_set_lazy_context(void *ip_ptr)
+{
+}
+
+static void rcu_reset_lazy_context(void)
+{
+}
+
+static void rcu_lazy_debug_init(void)
+{
+}
+
+#endif
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f4b390f86865..2b2a8d84896d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -67,6 +67,7 @@
#include "tree.h"
#include "rcu.h"
+#include "lazy-debug.h"
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
@@ -2245,7 +2246,10 @@ static void rcu_do_batch(struct rcu_data *rdp)
f = rhp->func;
WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
+
+ rcu_set_lazy_context(f);
f(rhp);
+ rcu_reset_lazy_context();
rcu_lock_release(&rcu_callback_map);
@@ -2770,6 +2774,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy)
}
check_cb_ovld(rdp);
+
+ if (lazy)
+ lazy_func_add(func);
+
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
return; // Enqueued onto ->nocb_bypass, so just leave.
// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
@@ -4805,6 +4813,7 @@ void __init rcu_init(void)
rcu_early_boot_tests();
kfree_rcu_batch_init();
+ rcu_lazy_debug_init();
rcu_bootup_announce();
sanitize_kthread_prio();
rcu_init_geometry();
--
2.38.0.413.g74048e4d9e-goog
Powered by blists - more mailing lists