[<prev] [next>] [day] [month] [year] [list]
Message-ID: <161313343950.23325.15842852853541510969.tip-bot2@tip-bot2>
Date: Fri, 12 Feb 2021 12:37:19 -0000
From: "tip-bot2 for Frederic Weisbecker" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Josh Triplett <josh@...htriplett.org>,
Steven Rostedt <rostedt@...dmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
Joel Fernandes <joel@...lfernandes.org>,
Neeraj Upadhyay <neeraju@...eaurora.org>,
Thomas Gleixner <tglx@...utronix.de>,
Boqun Feng <boqun.feng@...il.com>,
Frederic Weisbecker <frederic@...nel.org>,
"Paul E. McKenney" <paulmck@...nel.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: core/rcu] rcu/nocb: De-offloading GP kthread
The following commit has been merged into the core/rcu branch of tip:
Commit-ID: 5bb39dc956f3d4f1bb75b5962b503426c45340ae
Gitweb: https://git.kernel.org/tip/5bb39dc956f3d4f1bb75b5962b503426c45340ae
Author: Frederic Weisbecker <frederic@...nel.org>
AuthorDate: Fri, 13 Nov 2020 13:13:21 +01:00
Committer: Paul E. McKenney <paulmck@...nel.org>
CommitterDate: Wed, 06 Jan 2021 16:24:20 -08:00
rcu/nocb: De-offloading GP kthread
To de-offload callback processing back onto a CPU, it is necessary
to clear SEGCBLIST_OFFLOAD and notify the nocb GP kthread, which will
then clear its own bit flag and ignore this CPU until further notice.
Whichever of the nocb CB and nocb GP kthreads is last to clear its own
bit notifies the de-offloading worker kthread. Once notified, this
worker kthread can proceed safe in the knowledge that the nocb CB and
GP kthreads will no longer be manipulating this CPU's RCU callback list.
This commit makes this change.
Cc: Josh Triplett <josh@...htriplett.org>
Cc: Steven Rostedt <rostedt@...dmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Cc: Lai Jiangshan <jiangshanlai@...il.com>
Cc: Joel Fernandes <joel@...lfernandes.org>
Cc: Neeraj Upadhyay <neeraju@...eaurora.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Inspired-by: Paul E. McKenney <paulmck@...nel.org>
Tested-by: Boqun Feng <boqun.feng@...il.com>
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
---
kernel/rcu/tree_plugin.h | 54 ++++++++++++++++++++++++++++++++++++---
1 file changed, 51 insertions(+), 3 deletions(-)
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index b70cc91..fe46e70 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1928,6 +1928,33 @@ static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
__call_rcu_nocb_wake(rdp, true, flags);
}
+static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
+{
+ u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
+
+ return rcu_segcblist_test_flags(&rdp->cblist, flags);
+}
+
+static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_state)
+{
+ struct rcu_segcblist *cblist = &rdp->cblist;
+
+ if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
+ return true;
+ } else {
+ /*
+ * De-offloading. Clear our flag and notify the de-offload worker.
+ * We will ignore this rdp until it ever gets re-offloaded.
+ */
+ WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+ rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
+ if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+ *needwake_state = true;
+ return false;
+ }
+}
+
+
/*
* No-CBs GP kthreads come here to wait for additional callbacks to show up
* or for grace periods to end.
@@ -1956,8 +1983,17 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
*/
WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
+ bool needwake_state = false;
+ if (!nocb_gp_enabled_cb(rdp))
+ continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
rcu_nocb_lock_irqsave(rdp, flags);
+ if (!nocb_gp_update_state(rdp, &needwake_state)) {
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ if (needwake_state)
+ swake_up_one(&rdp->nocb_state_wq);
+ continue;
+ }
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
if (bypass_ncbs &&
(time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
@@ -2221,7 +2257,8 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
{
struct rcu_segcblist *cblist = &rdp->cblist;
- bool wake_cb = false;
+ struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+ bool wake_cb = false, wake_gp = false;
unsigned long flags;
printk("De-offloading %d\n", rdp->cpu);
@@ -2247,9 +2284,19 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
if (wake_cb)
swake_up_one(&rdp->nocb_cb_wq);
- swait_event_exclusive(rdp->nocb_state_wq,
- !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
+ raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+ if (rdp_gp->nocb_gp_sleep) {
+ rdp_gp->nocb_gp_sleep = false;
+ wake_gp = true;
+ }
+ raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+ if (wake_gp)
+ wake_up_process(rdp_gp->nocb_gp_kthread);
+
+ swait_event_exclusive(rdp->nocb_state_wq,
+ !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
+ SEGCBLIST_KTHREAD_GP));
return 0;
}
@@ -2332,6 +2379,7 @@ void __init rcu_init_nohz(void)
rcu_segcblist_init(&rdp->cblist);
rcu_segcblist_offload(&rdp->cblist, true);
rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
+ rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
}
rcu_organize_nocb_kthreads();
}
Powered by blists - more mailing lists