[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190117231730.2413466-2-songliubraving@fb.com>
Date: Thu, 17 Jan 2019 15:17:28 -0800
From: Song Liu <songliubraving@...com>
To: <linux-kernel@...r.kernel.org>, <netdev@...r.kernel.org>
CC: Peter Zijlstra <peterz@...radead.org>, <acme@...nel.org>,
<ast@...nel.org>, <daniel@...earbox.net>, <kernel-team@...com>,
Song Liu <songliubraving@...com>
Subject: [PATCH kallsyms, bpf 1/3] rbtree_latch: Introduce latch_tree_first() and latch_tree_next()
From: Peter Zijlstra <peterz@...radead.org>
These two functions will be used by kallsym_tree for dynamic symbols.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Song Liu <songliubraving@...com>
---
include/linux/rbtree_latch.h | 54 ++++++++++++++++++++++++++++++++++++
1 file changed, 54 insertions(+)
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index 7d012faa509a..d0001a136d3e 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -211,4 +211,58 @@ latch_tree_find(void *key, struct latch_tree_root *root,
return node;
}
+/**
+ * latch_tree_first() - return the first node in @root per sort order
+ * @root: trees to search
+ *
+ * Does a lockless lookup in the trees @root for the first node.
+ */
+static __always_inline struct latch_tree_node *
+latch_tree_first(struct latch_tree_root *root)
+{
+ struct latch_tree_node *ltn = NULL;
+ struct rb_node *node;
+ unsigned int seq;
+
+ do {
+ struct rb_root *rbr;
+
+ seq = raw_read_seqcount_latch(&root->seq);
+ rbr = &root->tree[seq & 1];
+ node = rb_first(rbr);
+ } while (read_seqcount_retry(&root->seq, seq));
+
+ if (node)
+ ltn = __lt_from_rb(node, seq & 1);
+
+ return ltn;
+}
+
+/**
+ * latch_tree_next() - find the next @ltn in @root per sort order
+ * @root: trees to which @ltn belongs
+ * @ltn: nodes to start from
+ *
+ * Does a lockless lookup in the trees @root for the next node starting at
+ * @ltn.
+ *
+ * Using this function outside of the write side lock is rather dodgy but given
+ * latch_tree_erase() doesn't re-init the nodes and the whole iteration is done
+ * under a single RCU critical section, it should be non-fatal and generate some
+ * semblance of order - albeit possibly missing chunks of the tree.
+ */
+static __always_inline struct latch_tree_node *
+latch_tree_next(struct latch_tree_root *root, struct latch_tree_node *ltn)
+{
+ struct rb_node *node;
+ unsigned int seq;
+
+ do {
+ seq = raw_read_seqcount_latch(&root->seq);
+ node = rb_next(<n->node[seq & 1]);
+ } while (read_seqcount_retry(&root->seq, seq));
+
+ return __lt_from_rb(node, seq & 1);
+}
+
#endif /* RB_TREE_LATCH_H */
--
2.17.1
Powered by blists - more mailing lists