[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240816061304.48843-2-neeraj.upadhyay@kernel.org>
Date: Fri, 16 Aug 2024 11:43:02 +0530
From: neeraj.upadhyay@...nel.org
To: rcu@...r.kernel.org
Cc: linux-kernel@...r.kernel.org,
kernel-team@...a.com,
rostedt@...dmis.org,
paulmck@...nel.org,
neeraj.upadhyay@...nel.org,
neeraj.upadhyay@....com,
boqun.feng@...il.com,
joel@...lfernandes.org,
urezki@...il.com,
frederic@...nel.org
Subject: [PATCH rcu 2/4] rcu: Extract synchronize_rcu_expedited_stall() from synchronize_rcu_expedited_wait()
From: "Paul E. McKenney" <paulmck@...nel.org>
This commit extracts the RCU CPU stall-warning report code from
synchronize_rcu_expedited_wait() and places it in a new function named
synchronize_rcu_expedited_stall(). This is strictly a code-movement
commit. A later commit will use this reorganization to avoid printing
expedited RCU CPU stall warnings while there are ongoing CSD-lock stall
reports.
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@...nel.org>
---
kernel/rcu/tree_exp.h | 111 +++++++++++++++++++++++-------------------
1 file changed, 60 insertions(+), 51 deletions(-)
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 4acd29d16fdb..17dd5169012d 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -542,6 +542,65 @@ static bool synchronize_rcu_expedited_wait_once(long tlimit)
return false;
}
+/*
+ * Print out an expedited RCU CPU stall warning message.
+ */
+static void synchronize_rcu_expedited_stall(unsigned long jiffies_start, unsigned long j)
+{
+ int cpu;
+ unsigned long mask;
+ int ndetected;
+ struct rcu_node *rnp;
+ struct rcu_node *rnp_root = rcu_get_root();
+
+ pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", rcu_state.name);
+ ndetected = 0;
+ rcu_for_each_leaf_node(rnp) {
+ ndetected += rcu_print_task_exp_stall(rnp);
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
+ struct rcu_data *rdp;
+
+ mask = leaf_node_cpu_bit(rnp, cpu);
+ if (!(READ_ONCE(rnp->expmask) & mask))
+ continue;
+ ndetected++;
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ pr_cont(" %d-%c%c%c%c", cpu,
+ "O."[!!cpu_online(cpu)],
+ "o."[!!(rdp->grpmask & rnp->expmaskinit)],
+ "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
+ "D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
+ }
+ }
+ pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
+ j - jiffies_start, rcu_state.expedited_sequence, data_race(rnp_root->expmask),
+ ".T"[!!data_race(rnp_root->exp_tasks)]);
+ if (ndetected) {
+ pr_err("blocking rcu_node structures (internal RCU debug):");
+ rcu_for_each_node_breadth_first(rnp) {
+ if (rnp == rnp_root)
+ continue; /* printed unconditionally */
+ if (sync_rcu_exp_done_unlocked(rnp))
+ continue;
+ pr_cont(" l=%u:%d-%d:%#lx/%c",
+ rnp->level, rnp->grplo, rnp->grphi, data_race(rnp->expmask),
+ ".T"[!!data_race(rnp->exp_tasks)]);
+ }
+ pr_cont("\n");
+ }
+ rcu_for_each_leaf_node(rnp) {
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
+ mask = leaf_node_cpu_bit(rnp, cpu);
+ if (!(READ_ONCE(rnp->expmask) & mask))
+ continue;
+ preempt_disable(); // For smp_processor_id() in dump_cpu_task().
+ dump_cpu_task(cpu);
+ preempt_enable();
+ }
+ rcu_exp_print_detail_task_stall_rnp(rnp);
+ }
+}
+
/*
* Wait for the expedited grace period to elapse, issuing any needed
* RCU CPU stall warnings along the way.
@@ -553,10 +612,8 @@ static void synchronize_rcu_expedited_wait(void)
unsigned long jiffies_stall;
unsigned long jiffies_start;
unsigned long mask;
- int ndetected;
struct rcu_data *rdp;
struct rcu_node *rnp;
- struct rcu_node *rnp_root = rcu_get_root();
unsigned long flags;
trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
@@ -593,55 +650,7 @@ static void synchronize_rcu_expedited_wait(void)
j = jiffies;
rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start));
trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
- pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
- rcu_state.name);
- ndetected = 0;
- rcu_for_each_leaf_node(rnp) {
- ndetected += rcu_print_task_exp_stall(rnp);
- for_each_leaf_node_possible_cpu(rnp, cpu) {
- struct rcu_data *rdp;
-
- mask = leaf_node_cpu_bit(rnp, cpu);
- if (!(READ_ONCE(rnp->expmask) & mask))
- continue;
- ndetected++;
- rdp = per_cpu_ptr(&rcu_data, cpu);
- pr_cont(" %d-%c%c%c%c", cpu,
- "O."[!!cpu_online(cpu)],
- "o."[!!(rdp->grpmask & rnp->expmaskinit)],
- "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
- "D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
- }
- }
- pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
- j - jiffies_start, rcu_state.expedited_sequence,
- data_race(rnp_root->expmask),
- ".T"[!!data_race(rnp_root->exp_tasks)]);
- if (ndetected) {
- pr_err("blocking rcu_node structures (internal RCU debug):");
- rcu_for_each_node_breadth_first(rnp) {
- if (rnp == rnp_root)
- continue; /* printed unconditionally */
- if (sync_rcu_exp_done_unlocked(rnp))
- continue;
- pr_cont(" l=%u:%d-%d:%#lx/%c",
- rnp->level, rnp->grplo, rnp->grphi,
- data_race(rnp->expmask),
- ".T"[!!data_race(rnp->exp_tasks)]);
- }
- pr_cont("\n");
- }
- rcu_for_each_leaf_node(rnp) {
- for_each_leaf_node_possible_cpu(rnp, cpu) {
- mask = leaf_node_cpu_bit(rnp, cpu);
- if (!(READ_ONCE(rnp->expmask) & mask))
- continue;
- preempt_disable(); // For smp_processor_id() in dump_cpu_task().
- dump_cpu_task(cpu);
- preempt_enable();
- }
- rcu_exp_print_detail_task_stall_rnp(rnp);
- }
+ synchronize_rcu_expedited_stall(jiffies_start, j);
jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
panic_on_rcu_stall();
}
--
2.40.1
Powered by blists - more mailing lists