[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190801230810.21570-3-paulmck@linux.ibm.com>
Date: Thu, 1 Aug 2019 16:07:55 -0700
From: "Paul E. McKenney" <paulmck@...ux.ibm.com>
To: rcu@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, mingo@...nel.org,
jiangshanlai@...il.com, dipankar@...ibm.com,
akpm@...ux-foundation.org, mathieu.desnoyers@...icios.com,
josh@...htriplett.org, tglx@...utronix.de, peterz@...radead.org,
rostedt@...dmis.org, dhowells@...hat.com, edumazet@...gle.com,
fweisbec@...il.com, oleg@...hat.com, joel@...lfernandes.org,
"Paul E. McKenney" <paulmck@...ux.ibm.com>
Subject: [PATCH tip/core/rcu 03/18] rcu/nocb: Add checks for offloaded callback processing
This commit is a preparatory patch for offloaded callbacks using the
same ->cblist structure used by non-offloaded callbacks. It therefore
adds rcu_segcblist_is_offloaded() calls where they will be needed when
!rcu_segcblist_is_enabled() no longer flags the offloaded case. It also
adds checks in rcu_do_batch() to ensure that there are no missed checks:
Currently, it should not be possible for offloaded execution to reach
rcu_do_batch(), though this will change later in this series.
Signed-off-by: Paul E. McKenney <paulmck@...ux.ibm.com>
---
kernel/rcu/tree.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 6f5c96c4f9a3..969ba292a669 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -210,7 +210,8 @@ static long rcu_get_n_cbs_cpu(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
- if (rcu_segcblist_is_enabled(&rdp->cblist)) /* Online normal CPU? */
+ if (rcu_segcblist_is_enabled(&rdp->cblist) &&
+ !rcu_segcblist_is_offloaded(&rdp->cblist)) /* Online normal CPU? */
return rcu_segcblist_n_cbs(&rdp->cblist);
return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */
}
@@ -2081,6 +2082,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
long bl, count;
+ WARN_ON_ONCE(rdp->cblist.offloaded);
/* If no callbacks are ready, just return. */
if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
trace_rcu_batch_start(rcu_state.name,
@@ -2299,7 +2301,8 @@ static __latent_entropy void rcu_core(void)
/* No grace period and unregistered callbacks? */
if (!rcu_gp_in_progress() &&
- rcu_segcblist_is_enabled(&rdp->cblist)) {
+ rcu_segcblist_is_enabled(&rdp->cblist) &&
+ !rcu_segcblist_is_offloaded(&rdp->cblist)) {
local_irq_save(flags);
if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
rcu_accelerate_cbs_unlocked(rnp, rdp);
@@ -2514,7 +2517,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
rdp = this_cpu_ptr(&rcu_data);
/* Add the callback to our list. */
- if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
+ if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) ||
+ rcu_segcblist_is_offloaded(&rdp->cblist) || cpu != -1) {
int offline;
if (cpu != -1)
@@ -2750,6 +2754,7 @@ static int rcu_pending(void)
/* Has RCU gone idle with this CPU needing another grace period? */
if (!rcu_gp_in_progress() &&
rcu_segcblist_is_enabled(&rdp->cblist) &&
+ !rcu_segcblist_is_offloaded(&rdp->cblist) &&
!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
return 1;
--
2.17.1
Powered by blists - more mailing lists