[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1307561407-13809-9-git-send-email-paulmck@linux.vnet.ibm.com>
Date: Wed, 8 Jun 2011 12:29:48 -0700
From: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To: linux-kernel@...r.kernel.org
Cc: mingo@...e.hu, laijs@...fujitsu.com, dipankar@...ibm.com,
akpm@...ux-foundation.org, mathieu.desnoyers@...ymtl.ca,
josh@...htriplett.org, niv@...ibm.com, tglx@...utronix.de,
peterz@...radead.org, rostedt@...dmis.org, Valdis.Kletnieks@...edu,
dhowells@...hat.com, eric.dumazet@...il.com, darren@...art.com,
patches@...aro.org, "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
Subject: [PATCH tip/core/rcu 09/28] rcu: Catch rcutorture up to new RCU API additions
Now that the RCU API contains synchronize_rcu_bh(), synchronize_sched(),
call_rcu_sched(), and rcu_bh_expedited()...
Make rcutorture test synchronize_rcu_bh(), getting rid of the old
rcu_bh_torture_synchronize() workaround. Similarly, make rcutorture test
synchronize_sched(), getting rid of the old sched_torture_synchronize()
workaround. Make rcutorture test call_rcu_sched() instead of wrappering
synchronize_sched(). Also add testing of rcu_bh_expedited().
Signed-off-by: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
---
kernel/rcutorture.c | 55 +++++++++++++++++++-------------------------------
1 files changed, 21 insertions(+), 34 deletions(-)
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 40d9ed2..f736fe3 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -480,30 +480,6 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
}
-struct rcu_bh_torture_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
-{
- struct rcu_bh_torture_synchronize *rcu;
-
- rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
- complete(&rcu->completion);
-}
-
-static void rcu_bh_torture_synchronize(void)
-{
- struct rcu_bh_torture_synchronize rcu;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
-}
-
static struct rcu_torture_ops rcu_bh_ops = {
.init = NULL,
.cleanup = NULL,
@@ -512,7 +488,7 @@ static struct rcu_torture_ops rcu_bh_ops = {
.readunlock = rcu_bh_torture_read_unlock,
.completed = rcu_bh_torture_completed,
.deferred_free = rcu_bh_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
+ .sync = synchronize_rcu_bh,
.cb_barrier = rcu_barrier_bh,
.fqs = rcu_bh_force_quiescent_state,
.stats = NULL,
@@ -528,7 +504,7 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
.readunlock = rcu_bh_torture_read_unlock,
.completed = rcu_bh_torture_completed,
.deferred_free = rcu_sync_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
+ .sync = synchronize_rcu_bh,
.cb_barrier = NULL,
.fqs = rcu_bh_force_quiescent_state,
.stats = NULL,
@@ -536,6 +512,22 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
.name = "rcu_bh_sync"
};
+static struct rcu_torture_ops rcu_bh_expedited_ops = {
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = synchronize_rcu_bh_expedited,
+ .cb_barrier = NULL,
+ .fqs = rcu_bh_force_quiescent_state,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_bh_expedited"
+};
+
/*
* Definitions for srcu torture testing.
*/
@@ -659,11 +651,6 @@ static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
}
-static void sched_torture_synchronize(void)
-{
- synchronize_sched();
-}
-
static struct rcu_torture_ops sched_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
@@ -672,7 +659,7 @@ static struct rcu_torture_ops sched_ops = {
.readunlock = sched_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_sched_torture_deferred_free,
- .sync = sched_torture_synchronize,
+ .sync = synchronize_sched,
.cb_barrier = rcu_barrier_sched,
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
@@ -688,7 +675,7 @@ static struct rcu_torture_ops sched_sync_ops = {
.readunlock = sched_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free,
- .sync = sched_torture_synchronize,
+ .sync = synchronize_sched,
.cb_barrier = NULL,
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
@@ -1427,7 +1414,7 @@ rcu_torture_init(void)
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] =
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
- &rcu_bh_ops, &rcu_bh_sync_ops,
+ &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
&srcu_ops, &srcu_expedited_ops,
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
--
1.7.3.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists