[<prev] [next>] [day] [month] [year] [list]
Message-ID: <158923076484.390.9211273599973610096.tip-bot2@tip-bot2>
Date: Mon, 11 May 2020 20:59:24 -0000
From: "tip-bot2 for Paul E. McKenney" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Paul E. McKenney" <paulmck@...nel.org>, x86 <x86@...nel.org>,
LKML <linux-kernel@...r.kernel.org>
Subject: [tip: core/rcu] rcu-tasks: Disable CPU hotplug across RCU tasks trace scans
The following commit has been merged into the core/rcu branch of tip:
Commit-ID: 81b4a7bc3b54b0b839dbf3d2b8c9a353ae910688
Gitweb: https://git.kernel.org/tip/81b4a7bc3b54b0b839dbf3d2b8c9a353ae910688
Author: Paul E. McKenney <paulmck@...nel.org>
AuthorDate: Sun, 22 Mar 2020 10:10:07 -07:00
Committer: Paul E. McKenney <paulmck@...nel.org>
CommitterDate: Mon, 27 Apr 2020 11:03:52 -07:00
rcu-tasks: Disable CPU hotplug across RCU tasks trace scans
This commit disables CPU hotplug across RCU tasks trace scans, which
is a first step towards correctly recognizing idle tasks "running" on
offline CPUs.
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
---
kernel/rcu/tasks.h | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index dd311e9..361e17d 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -910,16 +910,16 @@ static void rcu_tasks_trace_pregp_step(void)
{
int cpu;
- // Wait for CPU-hotplug paths to complete.
- cpus_read_lock();
- cpus_read_unlock();
-
// Allow for fast-acting IPIs.
atomic_set(&trc_n_readers_need_end, 1);
// There shouldn't be any old IPIs, but...
for_each_possible_cpu(cpu)
WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
+
+ // Disable CPU hotplug across the tasklist scan.
+ // This also waits for all readers in CPU-hotplug code paths.
+ cpus_read_lock();
}
/* Do first-round processing for the specified task. */
@@ -935,6 +935,9 @@ static void rcu_tasks_trace_pertask(struct task_struct *t,
/* Do intermediate processing between task and holdout scans. */
static void rcu_tasks_trace_postscan(void)
{
+ // Re-enable CPU hotplug now that the tasklist scan has completed.
+ cpus_read_unlock();
+
// Wait for late-stage exiting tasks to finish exiting.
// These might have passed the call to exit_tasks_rcu_finish().
synchronize_rcu();
@@ -979,6 +982,9 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
{
struct task_struct *g, *t;
+ // Disable CPU hotplug across the holdout list scan.
+ cpus_read_lock();
+
list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
// If safe and needed, try to check the current task.
if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
@@ -991,6 +997,10 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
else if (needreport)
show_stalled_task_trace(t, firstreport);
}
+
+ // Re-enable CPU hotplug now that the holdout list scan has completed.
+ cpus_read_unlock();
+
if (needreport) {
if (firstreport)
pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
Powered by blists - more mailing lists