[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220204233902.1902-6-paulmck@kernel.org>
Date: Fri, 4 Feb 2022 15:38:49 -0800
From: "Paul E. McKenney" <paulmck@...nel.org>
To: rcu@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, kernel-team@...com,
rostedt@...dmis.org, "Paul E. McKenney" <paulmck@...nel.org>
Subject: [PATCH rcu 06/19] srcu: Add size-state transitioning code
This is just dead code at the moment, but it serves to prevent
spurious compiler warnings about init_srcu_struct_nodes() being unused.
This function will once again be used once the state-transition code
is activated.
Because srcu_barrier() must be aware of transition before call_srcu(), the
state machine waits for an SRCU grace period before callbacks are queued
to the non-CPU-0 queues. This requres that portions of srcu_barrier()
be enclosed in an SRCU read-side critical section.
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
---
kernel/rcu/srcutree.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 2bbe8a5d9ae86..fce041b648ee3 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -202,7 +202,6 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
if (!ssp->sda)
return -ENOMEM;
init_srcu_struct_data(ssp);
- WARN_ON_ONCE(!init_srcu_struct_nodes(ssp));
ssp->srcu_gp_seq_needed_exp = 0;
ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
@@ -555,6 +554,7 @@ static void srcu_gp_end(struct srcu_struct *ssp)
unsigned long mask;
struct srcu_data *sdp;
struct srcu_node *snp;
+ int ss_state;
/* Prevent more than one additional grace period. */
mutex_lock(&ssp->srcu_cb_mutex);
@@ -622,6 +622,15 @@ static void srcu_gp_end(struct srcu_struct *ssp)
} else {
spin_unlock_irq_rcu_node(ssp);
}
+
+ /* Transition to big if needed. */
+ ss_state = smp_load_acquire(&ssp->srcu_size_state);
+ if (ss_state && ss_state != SRCU_SIZE_BIG) {
+ if (ss_state == SRCU_SIZE_ALLOC)
+ init_srcu_struct_nodes(ssp);
+ else
+ smp_store_release(&ssp->srcu_size_state, ss_state + 1);
+ }
}
/*
@@ -1172,6 +1181,7 @@ static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
void srcu_barrier(struct srcu_struct *ssp)
{
int cpu;
+ int idx;
unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
check_init_srcu_struct(ssp);
@@ -1187,11 +1197,13 @@ void srcu_barrier(struct srcu_struct *ssp)
/* Initial count prevents reaching zero until all CBs are posted. */
atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
+ idx = srcu_read_lock(ssp);
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
else
for_each_possible_cpu(cpu)
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
+ srcu_read_unlock(ssp, idx);
/* Remove the initial count, at which point reaching zero can happen. */
if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
--
2.31.1.189.g2e36527f23
Powered by blists - more mailing lists