lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 19 Mar 2015 17:36:36 -0400
From:	Steven Rostedt <rostedt@...dmis.org>
To:	Andrew Morton <akpm@...ux-foundation.org>
Cc:	Peter Zijlstra <peterz@...radead.org>, mingo@...nel.org,
	rusty@...tcorp.com.au, mathieu.desnoyers@...icios.com,
	oleg@...hat.com, paulmck@...ux.vnet.ibm.com,
	torvalds@...ux-foundation.org, linux-kernel@...r.kernel.org,
	andi@...stfloor.org, tglx@...utronix.de,
	Michel Lespinasse <walken@...gle.com>,
	Andrea Arcangeli <aarcange@...hat.com>,
	David Woodhouse <David.Woodhouse@...el.com>,
	Rik van Riel <riel@...hat.com>
Subject: Re: [PATCH 6/8] rbtree: Implement generic latch_tree

On Thu, 19 Mar 2015 13:58:33 -0700
Andrew Morton <akpm@...ux-foundation.org> wrote:

> OK.  This code is basically required to support perf/ftrace and
> modules, yes?  Presumably small and space-constrained systems aren't
> using either, so they don't take the hit.
> 
> However CONFIG_MODULES systems which aren't using perf/ftrace _do_ take
> a hit.  How many systems are we talking here?  All non-x86?

Compromise... (Totally untested)

-- Steve

diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index bd4a9700c06c..e22f758ca433 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -74,150 +74,24 @@ struct latch_tree_ops {
 	int  (*comp)(void *key,                 struct latch_tree_node *b);
 };
 
-static __always_inline void
-__lt_insert(struct latch_tree_node *ltn, struct rb_root *root,
-	    bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b))
-{
-	struct rb_node **link = &root->rb_node;
-	struct rb_node *parent = NULL;
-	struct latch_tree_node *ltp;
-
-	while (*link) {
-		parent = *link;
-		ltp = container_of(parent, struct latch_tree_node, node);
-
-		if (less(ltn, ltp))
-			link = &parent->rb_left;
-		else
-			link = &parent->rb_right;
-	}
-
-	rb_link_node_rcu(&ltn->node, parent, link);
-	rb_insert_color(&ltn->node, root);
-}
-
-static __always_inline void
-__lt_erase(struct latch_tree_node *ltn, struct rb_root *root)
-{
-	rb_erase(&ltn->node, root);
-}
-
-static __always_inline struct latch_tree_node *
-__lt_find(void *key, struct rb_root *root,
-	  int (*comp)(void *key, struct latch_tree_node *ltn))
-{
-	struct rb_node *n = rcu_dereference_raw(root->rb_node);
-	struct latch_tree_node *ltn;
-	int c;
-
-	while (n) {
-		ltn = container_of(n, struct latch_tree_node, node);
-		c = comp(key, ltn);
-
-		if (c < 0)
-			n = rcu_dereference_raw(n->rb_left);
-		else if (c > 0)
-			n = rcu_dereference_raw(n->rb_right);
-		else
-			return ltn;
-	}
-
-	return NULL;
-}
-
-/**
- * latch_tree_insert() - insert @nodes into the trees @root
- * @nodes: nodes to insert
- * @root: trees to insert @nodes into
- * @priv: pointer to node private data
- * @ops: operators defining the node order
- *
- * Initializes @nodes private pointer with @priv; which typically is a back
- * pointer to the containing structure, used by @ops to find the search key.
- *
- * Then it inserts @nodes into @root in an ordered fashion such that we can
- * always observe one complete tree. See the comment for
- * raw_write_seqcount_latch().
- *
- * The inserts use rcu_assign_pointer() to publish the element such that
- * both the @priv pointer values in @nodes as the tree structure is stored
- * before we can observe the new @nodes.
- *
- * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
- * serialized.
- */
-static __always_inline void
-latch_tree_insert(struct latch_tree_nodes *nodes,
-		  struct latch_tree_root *root,
-		  void *priv,
-		  const struct latch_tree_ops *ops)
-{
-	nodes->node[0].priv = nodes->node[1].priv = priv;
-
-	raw_write_seqcount_latch(&root->seq);
-	__lt_insert(&nodes->node[0], &root->tree[0], ops->less);
-	raw_write_seqcount_latch(&root->seq);
-	__lt_insert(&nodes->node[1], &root->tree[1], ops->less);
-}
-
-/**
- * latch_tree_erase() - removes @nodes from the trees @root
- * @nodes: nodes to remote
- * @root: trees to remove @nodes from
- * @ops: operators defining the node order
- *
- * Removes @nodes from the trees @root in an ordered fashion such that we can
- * always observe one complete tree. See the comment for
- * raw_write_seqcount_latch().
- *
- * It is assumed that @nodes will observe one RCU quiesent state before being
- * reused of freed.
- *
- * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
- * serialized.
- */
-static __always_inline void
-latch_tree_erase(struct latch_tree_nodes *nodes,
-		 struct latch_tree_root *root,
-		 const struct latch_tree_ops *ops)
-{
-	raw_write_seqcount_latch(&root->seq);
-	__lt_erase(&nodes->node[0], &root->tree[0]);
-	raw_write_seqcount_latch(&root->seq);
-	__lt_erase(&nodes->node[1], &root->tree[1]);
-}
-
-/**
- * latch_tree_find() - find the node matching @key in the trees @root
- * @key: search key
- * @root: trees to search for @key
- * @ops: operators defining the node order
- *
- * Does a lockless lookup in the trees @root for the node matching @key.
- *
- * It is assumed that this is called while holding the appropriate RCU read
- * side lock.
- *
- * If the operators define a partial order on the elements (there are multiple
- * elements which have the same key value) it is undefined which of these
- * elements will be found. Nor is it possible to iterate the tree to find
- * further elements with the same key value.
- *
- * Returns: a pointer to the node matching @key or NULL.
- */
-static __always_inline struct latch_tree_node *
-latch_tree_find(void *key, struct latch_tree_root *root,
-		const struct latch_tree_ops *ops)
-{
-	struct latch_tree_node *node;
-	unsigned int seq;
-
-	do {
-		seq = raw_read_seqcount(&root->seq);
-		node = __lt_find(key, &root->tree[seq & 1], ops->comp);
-	} while (read_seqcount_retry(&root->seq, seq));
-
-	return node;
-}
+#ifdef CONFIG_RBTREE_LATCH_INLINE
+#define RBTREE_LATCH_ANNOTATE static __always_inline
+#include <linux/rbtree_latch_code.h>
+#else
+void __lt_insert(struct latch_tree_node *ltn, struct rb_root *root,
+	bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b));
+void __lt_erase(struct latch_tree_node *ltn, struct rb_root *root);
+struct latch_tree_node *__lt_find(void *key, struct rb_root *root,
+	  int (*comp)(void *key, struct latch_tree_node *ltn));
+void latch_tree_insert(struct latch_tree_nodes *nodes,
+		       struct latch_tree_root *root,
+		       void *priv,
+		       const struct latch_tree_ops *ops);
+void latch_tree_erase(struct latch_tree_nodes *nodes,
+		      struct latch_tree_root *root,
+		      const struct latch_tree_ops *ops);
+struct latch_tree_node *latch_tree_find(void *key, struct latch_tree_root *root,
+					const struct latch_tree_ops *ops);
+#endif
 
 #endif /* RB_TREE_LATCH_H */
diff --git a/include/linux/rbtree_latch_code.h b/include/linux/rbtree_latch_code.h
new file mode 100644
index 000000000000..940fd763ee97
--- /dev/null
+++ b/include/linux/rbtree_latch_code.h
@@ -0,0 +1,146 @@
+
+RBTREE_LATCH_ANNOTATE void
+__lt_insert(struct latch_tree_node *ltn, struct rb_root *root,
+	    bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b))
+{
+	struct rb_node **link = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct latch_tree_node *ltp;
+
+	while (*link) {
+		parent = *link;
+		ltp = container_of(parent, struct latch_tree_node, node);
+
+		if (less(ltn, ltp))
+			link = &parent->rb_left;
+		else
+			link = &parent->rb_right;
+	}
+
+	rb_link_node_rcu(&ltn->node, parent, link);
+	rb_insert_color(&ltn->node, root);
+}
+
+RBTREE_LATCH_ANNOTATE void
+__lt_erase(struct latch_tree_node *ltn, struct rb_root *root)
+{
+	rb_erase(&ltn->node, root);
+}
+
+RBTREE_LATCH_ANNOTATE struct latch_tree_node *
+__lt_find(void *key, struct rb_root *root,
+	  int (*comp)(void *key, struct latch_tree_node *ltn))
+{
+	struct rb_node *n = rcu_dereference_raw(root->rb_node);
+	struct latch_tree_node *ltn;
+	int c;
+
+	while (n) {
+		ltn = container_of(n, struct latch_tree_node, node);
+		c = comp(key, ltn);
+
+		if (c < 0)
+			n = rcu_dereference_raw(n->rb_left);
+		else if (c > 0)
+			n = rcu_dereference_raw(n->rb_right);
+		else
+			return ltn;
+	}
+
+	return NULL;
+}
+
+/**
+ * latch_tree_insert() - insert @nodes into the trees @root
+ * @nodes: nodes to insert
+ * @root: trees to insert @nodes into
+ * @priv: pointer to node private data
+ * @ops: operators defining the node order
+ *
+ * Initializes @nodes private pointer with @priv; which typically is a back
+ * pointer to the containing structure, used by @ops to find the search key.
+ *
+ * Then it inserts @nodes into @root in an ordered fashion such that we can
+ * always observe one complete tree. See the comment for
+ * raw_write_seqcount_latch().
+ *
+ * The inserts use rcu_assign_pointer() to publish the element such that
+ * both the @priv pointer values in @nodes as the tree structure is stored
+ * before we can observe the new @nodes.
+ *
+ * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
+ * serialized.
+ */
+RBTREE_LATCH_ANNOTATE void
+latch_tree_insert(struct latch_tree_nodes *nodes,
+		  struct latch_tree_root *root,
+		  void *priv,
+		  const struct latch_tree_ops *ops)
+{
+	nodes->node[0].priv = nodes->node[1].priv = priv;
+
+	raw_write_seqcount_latch(&root->seq);
+	__lt_insert(&nodes->node[0], &root->tree[0], ops->less);
+	raw_write_seqcount_latch(&root->seq);
+	__lt_insert(&nodes->node[1], &root->tree[1], ops->less);
+}
+
+/**
+ * latch_tree_erase() - removes @nodes from the trees @root
+ * @nodes: nodes to remote
+ * @root: trees to remove @nodes from
+ * @ops: operators defining the node order
+ *
+ * Removes @nodes from the trees @root in an ordered fashion such that we can
+ * always observe one complete tree. See the comment for
+ * raw_write_seqcount_latch().
+ *
+ * It is assumed that @nodes will observe one RCU quiesent state before being
+ * reused of freed.
+ *
+ * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
+ * serialized.
+ */
+RBTREE_LATCH_ANNOTATE void
+latch_tree_erase(struct latch_tree_nodes *nodes,
+		 struct latch_tree_root *root,
+		 const struct latch_tree_ops *ops)
+{
+	raw_write_seqcount_latch(&root->seq);
+	__lt_erase(&nodes->node[0], &root->tree[0]);
+	raw_write_seqcount_latch(&root->seq);
+	__lt_erase(&nodes->node[1], &root->tree[1]);
+}
+
+/**
+ * latch_tree_find() - find the node matching @key in the trees @root
+ * @key: search key
+ * @root: trees to search for @key
+ * @ops: operators defining the node order
+ *
+ * Does a lockless lookup in the trees @root for the node matching @key.
+ *
+ * It is assumed that this is called while holding the appropriate RCU read
+ * side lock.
+ *
+ * If the operators define a partial order on the elements (there are multiple
+ * elements which have the same key value) it is undefined which of these
+ * elements will be found. Nor is it possible to iterate the tree to find
+ * further elements with the same key value.
+ *
+ * Returns: a pointer to the node matching @key or NULL.
+ */
+RBTREE_LATCH_ANNOTATE struct latch_tree_node *
+latch_tree_find(void *key, struct latch_tree_root *root,
+		const struct latch_tree_ops *ops)
+{
+	struct latch_tree_node *node;
+	unsigned int seq;
+
+	do {
+		seq = raw_read_seqcount(&root->seq);
+		node = __lt_find(key, &root->tree[seq & 1], ops->comp);
+	} while (read_seqcount_retry(&root->seq, seq));
+
+	return node;
+}
diff --git a/lib/Kconfig b/lib/Kconfig
index 87da53bb1fef..6d515f8d8fb5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -527,4 +527,8 @@ source "lib/fonts/Kconfig"
 config ARCH_HAS_SG_CHAIN
 	def_bool n
 
+config RBTREE_LATCH_INLINE
+       def_bool y
+       depends on PERF_EVENTS || TRACING
+
 endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 58f74d2dd396..3dfb4f697239 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
 	 sha1.o md5.o irq_regs.o argv_split.o \
 	 proportions.o flex_proportions.o ratelimit.o show_mem.o \
 	 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-	 earlycpio.o seq_buf.o
+	 earlycpio.o seq_buf.o rbtree_latch.o
 
 obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
 lib-$(CONFIG_MMU) += ioremap.o
diff --git a/lib/rbtree_latch.c b/lib/rbtree_latch.c
new file mode 100644
index 000000000000..13867da4f27f
--- /dev/null
+++ b/lib/rbtree_latch.c
@@ -0,0 +1,5 @@
+
+#ifndef CONFIG_RBTREE_LATCH_INLINE
+#define RBTREE_LATCH_ANNOTATE
+#include <linux/rbtree_latch_code.h>
+#endif
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ