[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1444149934-16764-6-git-send-email-paulmck@linux.vnet.ibm.com>
Date: Tue, 6 Oct 2015 09:45:27 -0700
From: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To: linux-kernel@...r.kernel.org
Cc: mingo@...nel.org, jiangshanlai@...il.com, dipankar@...ibm.com,
akpm@...ux-foundation.org, mathieu.desnoyers@...icios.com,
josh@...htriplett.org, tglx@...utronix.de, peterz@...radead.org,
rostedt@...dmis.org, dhowells@...hat.com, edumazet@...gle.com,
dvhart@...ux.intel.com, fweisbec@...il.com, oleg@...hat.com,
bobby.prani@...il.com,
"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
Subject: [PATCH tip/core/rcu 06/13] rcu_sync: Simplify rcu_sync using new rcu_sync_ops structure
From: Oleg Nesterov <oleg@...hat.com>
This commit adds the new struct rcu_sync_ops which holds sync/call
methods, and turns the function pointers in rcu_sync_struct into an array
of struct rcu_sync_ops. This simplifies the "init" helpers by collapsing
a switch statement and explicit multiple definitions into a simple
assignment and a helper macro, respectively.
Signed-off-by: Oleg Nesterov <oleg@...hat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
---
include/linux/rcu_sync.h | 60 +++++++++++++++++++-----------------------------
kernel/rcu/sync.c | 42 +++++++++++++++++----------------
2 files changed, 45 insertions(+), 57 deletions(-)
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index cb044df2e21c..c6d2272c4459 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -26,6 +26,8 @@
#include <linux/wait.h>
#include <linux/rcupdate.h>
+enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC };
+
/* Structure to mediate between updaters and fastpath-using readers. */
struct rcu_sync {
int gp_state;
@@ -35,43 +37,9 @@ struct rcu_sync {
int cb_state;
struct rcu_head cb_head;
- void (*sync)(void);
- void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
+ enum rcu_sync_type gp_type;
};
-#define ___RCU_SYNC_INIT(name) \
- .gp_state = 0, \
- .gp_count = 0, \
- .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \
- .cb_state = 0
-
-#define __RCU_SCHED_SYNC_INIT(name) { \
- ___RCU_SYNC_INIT(name), \
- .sync = synchronize_sched, \
- .call = call_rcu_sched, \
-}
-
-#define __RCU_BH_SYNC_INIT(name) { \
- ___RCU_SYNC_INIT(name), \
- .sync = synchronize_rcu_bh, \
- .call = call_rcu_bh, \
-}
-
-#define __RCU_SYNC_INIT(name) { \
- ___RCU_SYNC_INIT(name), \
- .sync = synchronize_rcu, \
- .call = call_rcu, \
-}
-
-#define DEFINE_RCU_SCHED_SYNC(name) \
- struct rcu_sync name = __RCU_SCHED_SYNC_INIT(name)
-
-#define DEFINE_RCU_BH_SYNC(name) \
- struct rcu_sync name = __RCU_BH_SYNC_INIT(name)
-
-#define DEFINE_RCU_SYNC(name) \
- struct rcu_sync name = __RCU_SYNC_INIT(name)
-
/**
* rcu_sync_is_idle() - Are readers permitted to use their fastpaths?
* @rsp: Pointer to rcu_sync structure to use for synchronization
@@ -85,10 +53,28 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
return !rsp->gp_state; /* GP_IDLE */
}
-enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC };
-
extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
+#define __RCU_SYNC_INITIALIZER(name, type) { \
+ .gp_state = 0, \
+ .gp_count = 0, \
+ .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \
+ .cb_state = 0, \
+ .gp_type = type, \
+ }
+
+#define __DEFINE_RCU_SYNC(name, type) \
+ struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type)
+
+#define DEFINE_RCU_SYNC(name) \
+ __DEFINE_RCU_SYNC(name, RCU_SYNC)
+
+#define DEFINE_RCU_SCHED_SYNC(name) \
+ __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC)
+
+#define DEFINE_RCU_BH_SYNC(name) \
+ __DEFINE_RCU_SYNC(name, RCU_BH_SYNC)
+
#endif /* _LINUX_RCU_SYNC_H_ */
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 0a11df43be23..5a9aa4c394f1 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -23,6 +23,24 @@
#include <linux/rcu_sync.h>
#include <linux/sched.h>
+static const struct {
+ void (*sync)(void);
+ void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
+} gp_ops[] = {
+ [RCU_SYNC] = {
+ .sync = synchronize_rcu,
+ .call = call_rcu,
+ },
+ [RCU_SCHED_SYNC] = {
+ .sync = synchronize_sched,
+ .call = call_rcu_sched,
+ },
+ [RCU_BH_SYNC] = {
+ .sync = synchronize_rcu_bh,
+ .call = call_rcu_bh,
+ },
+};
+
enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
@@ -37,23 +55,7 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
{
memset(rsp, 0, sizeof(*rsp));
init_waitqueue_head(&rsp->gp_wait);
-
- switch (type) {
- case RCU_SYNC:
- rsp->sync = synchronize_rcu;
- rsp->call = call_rcu;
- break;
-
- case RCU_SCHED_SYNC:
- rsp->sync = synchronize_sched;
- rsp->call = call_rcu_sched;
- break;
-
- case RCU_BH_SYNC:
- rsp->sync = synchronize_rcu_bh;
- rsp->call = call_rcu_bh;
- break;
- }
+ rsp->gp_type = type;
}
/**
@@ -85,7 +87,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
BUG_ON(need_wait && need_sync);
if (need_sync) {
- rsp->sync();
+ gp_ops[rsp->gp_type].sync();
rsp->gp_state = GP_PASSED;
wake_up_all(&rsp->gp_wait);
} else if (need_wait) {
@@ -138,7 +140,7 @@ static void rcu_sync_func(struct rcu_head *rcu)
* to catch a later GP.
*/
rsp->cb_state = CB_PENDING;
- rsp->call(&rsp->cb_head, rcu_sync_func);
+ gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
} else {
/*
* We're at least a GP after rcu_sync_exit(); eveybody will now
@@ -166,7 +168,7 @@ void rcu_sync_exit(struct rcu_sync *rsp)
if (!--rsp->gp_count) {
if (rsp->cb_state == CB_IDLE) {
rsp->cb_state = CB_PENDING;
- rsp->call(&rsp->cb_head, rcu_sync_func);
+ gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
} else if (rsp->cb_state == CB_PENDING) {
rsp->cb_state = CB_REPLAY;
}
--
2.5.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists