Subject: From: Peter Zijlstra Date: Thu Jan 17 11:41:01 CET 2019 Signed-off-by: Peter Zijlstra (Intel) --- include/linux/rbtree_latch.h | 48 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) --- a/include/linux/rbtree_latch.h +++ b/include/linux/rbtree_latch.h @@ -211,4 +211,52 @@ latch_tree_find(void *key, struct latch_ return node; } +static __always_inline struct latch_tree_node * +latch_tree_first(struct latch_tree_root *root) +{ + struct latch_tree_node *ltn = NULL; + struct rb_node *node; + unsigned int seq; + + do { + struct rb_root *rbr; + + seq = raw_read_seqcount_latch(&root->seq); + rbr = &root->tree[seq & 1]; + node = rb_first(rbr); + } while (read_seqcount_retry(&root->seq, seq)); + + if (node) + ltn = __lt_from_rb(node, seq & 1); + + return ltn; +} + +/** + * latch_tree_next() - find the next @ltn in @root per sort order + * @root: trees to which @ltn belongs + * @ltn: nodes to start from + * + * Does a lockless lookup in the trees @root for the next node starting at + * @ltn. + * + * Using this function outside of the write side lock is rather dodgy but given + * latch_tree_erase() doesn't re-init the nodes and the whole iteration is done + * under a single RCU critical section, it should be non-fatal and generate some + * semblance of order - albeit possibly missing chunks of the tree. + */ +static __always_inline struct latch_tree_node * +latch_tree_next(struct latch_tree_root *root, struct latch_tree_node *ltn) +{ + struct rb_node *node; + unsigned int seq; + + do { + seq = raw_read_seqcount_latch(&root->seq); + node = rb_next(<n->node[seq & 1]); + } while (read_seqcount_retry(&root->seq, seq)); + + return __lt_from_rb(node, seq & 1); +} + #endif /* RB_TREE_LATCH_H */