[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200430010405.GA237715@google.com>
Date: Wed, 29 Apr 2020 18:04:05 -0700
From: Michel Lespinasse <walken@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: linux-kernel@...r.kernel.org, dave@...olabs.net, mingo@...nel.org,
tglx@...utronix.de, oleg@...hat.com, irogers@...gle.com,
juri.lelli@...hat.com, vincent.guittot@...aro.org
Subject: Re: [RFC][PATCH 1/7] rbtree: Add generic add and find helpers
Hi Peter,
On Wed, Apr 29, 2020 at 05:32:59PM +0200, Peter Zijlstra wrote:
> I've always been bothered by the endless (fragile) boilerplate for
> rbtree, and I recently wrote some rbtree helpers for objtool and
> figured I should lift them into the kernel and use them more widely.
>
> Provide:
>
> partial-order; less() based:
> - rb_add(): add a new entry to the rbtree
> - rb_add_cached(): like rb_add(), but for a rb_root_cached
>
> total-order; cmp() based:
> - rb_find(): find an entry in an rbtree
> - rb_find_first(): find the first (leftmost) matching entry
> - rb_next_match(): continue from rb_find_first()
> - rb_for_each(): for loop with rb_find_first() / rb_next_match()
>
> Also make rb_add_cached() / rb_erase_cached() return true when
> leftmost.
>
> Inlining and constant propagation should see the compiler inline the
> whole thing, including the various compare functions.
I really like the idea of this change. Also,I think it opens avenues
for converting some users which had previously been avoiding raw rbtrees
seemingly only because they didn't want to write this boilerplate.
Few questions:
- Adding the rb_add_cached() / rb_erase_cached() return value looks
like it almost belongs to a separate patch. Is this only used in
patch 3/7 (sched/deadline) or did I miss other uses ? Not objecting
to it, but it wasn't obvious to me when reading the patch what the
return value was for.
- Have you considered passing a cmp() function to rb_add() and
rb_add_cached(), and having these test cmp() < 0 rather than less() ?
I figure every user will need to have a cmp() function, so it'd be
nicer if they didn't also need a less() function, if the generated
code is similar (if you checked and rejected it because of bad code,
please just say so).
Reviewed-by: Michel Lespinasse <walken@...gle.com>
I also looked at the other commits in the series, making use of the
helpers, and they seem very reasonable but I did not give them as
thorough a look at this one.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> ---
> include/linux/rbtree.h | 127 +++++++++++++++++++++++++++++++++++++++++-
> tools/include/linux/rbtree.h | 129 ++++++++++++++++++++++++++++++++++++++++++-
> tools/objtool/elf.c | 63 ++-------------------
> 3 files changed, 257 insertions(+), 62 deletions(-)
>
> --- a/include/linux/rbtree.h
> +++ b/include/linux/rbtree.h
> @@ -141,12 +141,18 @@ static inline void rb_insert_color_cache
> rb_insert_color(node, &root->rb_root);
> }
>
> -static inline void rb_erase_cached(struct rb_node *node,
> +static inline bool rb_erase_cached(struct rb_node *node,
> struct rb_root_cached *root)
> {
> - if (root->rb_leftmost == node)
> + bool leftmost = false;
> +
> + if (root->rb_leftmost == node) {
> root->rb_leftmost = rb_next(node);
> + leftmost = true;
> + }
> rb_erase(node, &root->rb_root);
> +
> + return leftmost;
> }
>
> static inline void rb_replace_node_cached(struct rb_node *victim,
> @@ -158,4 +164,121 @@ static inline void rb_replace_node_cache
> rb_replace_node(victim, new, &root->rb_root);
> }
>
> +static inline bool rb_add_cached(struct rb_root_cached *tree, struct rb_node *node,
> + bool (*less)(struct rb_node *, const struct rb_node *))
> +{
> + struct rb_node **link = &tree->rb_root.rb_node;
> + struct rb_node *parent = NULL;
> + bool leftmost = true;
> +
> + while (*link) {
> + parent = *link;
> + if (less(node, parent)) {
> + link = &parent->rb_left;
> + } else {
> + link = &parent->rb_right;
> + leftmost = false;
> + }
> + }
> +
> + rb_link_node(node, parent, link);
> + rb_insert_color_cached(node, tree, leftmost);
> +
> + return leftmost;
> +}
> +
> +static inline void rb_add(struct rb_root *tree, struct rb_node *node,
> + bool (*less)(struct rb_node *, const struct rb_node *))
> +{
> + struct rb_node **link = &tree->rb_node;
> + struct rb_node *parent = NULL;
> +
> + while (*link) {
> + parent = *link;
> + if (less(node, parent))
> + link = &parent->rb_left;
> + else
> + link = &parent->rb_right;
> + }
> +
> + rb_link_node(node, parent, link);
> + rb_insert_color(node, tree);
> +}
> +
> +static inline struct rb_node *rb_find_add(struct rb_root *tree, struct rb_node *node,
> + int (*cmp)(struct rb_node *, const struct rb_node *))
> +{
> + struct rb_node **link = &tree->rb_node;
> + struct rb_node *parent = NULL;
> + int c;
> +
> + while (*link) {
> + parent = *link;
> + c = cmp(node, parent);
> +
> + if (c < 0)
> + link = &parent->rb_left;
> + else if (c > 0)
> + link = &parent->rb_right;
> + else
> + return parent;
> + }
> +
> + rb_link_node(node, parent, link);
> + rb_insert_color(node, tree);
> + return NULL;
> +}
> +
> +static inline struct rb_node *rb_find(struct rb_root *tree, const void *key,
> + int (*cmp)(const void *key, const struct rb_node *))
> +{
> + struct rb_node *node = tree->rb_node;
> +
> + while (node) {
> + int c = cmp(key, node);
> + if (c < 0) {
> + node = node->rb_left;
> + } else if (c > 0) {
> + node = node->rb_right;
> + } else
> + return node;
> + }
> +
> + return NULL;
> +}
> +
> +static inline struct rb_node *rb_find_first(struct rb_root *tree, const void *key,
> + int (*cmp)(const void *key, const struct rb_node *))
> +{
> + struct rb_node *node = tree->rb_node;
> + struct rb_node *match = NULL;
> +
> + while (node) {
> + int c = cmp(key, node);
> + if (c <= 0) {
> + if (!c)
> + match = node;
> + node = node->rb_left;
> + } else if (c > 0) {
> + node = node->rb_right;
> + }
> + }
> +
> + return match;
> +}
> +
> +static inline struct rb_node *rb_next_match(struct rb_node *node, const void *key,
> + int (*cmp)(const void *key, const struct rb_node *))
> +{
> + node = rb_next(node);
> + if (node && cmp(key, node))
> + node = NULL;
> + return node;
> +}
> +
> +#define rb_for_each(tree, node, key, cmp) \
> + for ((node) = rb_find_first((tree), (key), (cmp)); \
> + (node); (node) = rb_next_match((node), (key), (cmp)))
> +
> +
> #endif /* _LINUX_RBTREE_H */
> --- a/tools/include/linux/rbtree.h
> +++ b/tools/include/linux/rbtree.h
> @@ -135,12 +135,18 @@ static inline void rb_insert_color_cache
> rb_insert_color(node, &root->rb_root);
> }
>
> -static inline void rb_erase_cached(struct rb_node *node,
> +static inline bool rb_erase_cached(struct rb_node *node,
> struct rb_root_cached *root)
> {
> - if (root->rb_leftmost == node)
> + bool leftmost = false;
> +
> + if (root->rb_leftmost == node) {
> root->rb_leftmost = rb_next(node);
> + leftmost = true;
> + }
> rb_erase(node, &root->rb_root);
> +
> + return leftmost;
> }
>
> static inline void rb_replace_node_cached(struct rb_node *victim,
> @@ -152,4 +158,121 @@ static inline void rb_replace_node_cache
> rb_replace_node(victim, new, &root->rb_root);
> }
>
> -#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
> +static inline bool rb_add_cached(struct rb_root_cached *tree, struct rb_node *node,
> + bool (*less)(struct rb_node *, const struct rb_node *))
> +{
> + struct rb_node **link = &tree->rb_root.rb_node;
> + struct rb_node *parent = NULL;
> + bool leftmost = true;
> +
> + while (*link) {
> + parent = *link;
> + if (less(node, parent)) {
> + link = &parent->rb_left;
> + } else {
> + link = &parent->rb_right;
> + leftmost = false;
> + }
> + }
> +
> + rb_link_node(node, parent, link);
> + rb_insert_color_cached(node, tree, leftmost);
> +
> + return leftmost;
> +}
> +
> +static inline void rb_add(struct rb_root *tree, struct rb_node *node,
> + bool (*less)(struct rb_node *, const struct rb_node *))
> +{
> + struct rb_node **link = &tree->rb_node;
> + struct rb_node *parent = NULL;
> +
> + while (*link) {
> + parent = *link;
> + if (less(node, parent))
> + link = &parent->rb_left;
> + else
> + link = &parent->rb_right;
> + }
> +
> + rb_link_node(node, parent, link);
> + rb_insert_color(node, tree);
> +}
> +
> +static inline struct rb_node *rb_find_add(struct rb_root *tree, struct rb_node *node,
> + int (*cmp)(struct rb_node *, const struct rb_node *))
> +{
> + struct rb_node **link = &tree->rb_node;
> + struct rb_node *parent = NULL;
> + int c;
> +
> + while (*link) {
> + parent = *link;
> + c = cmp(node, parent);
> +
> + if (c < 0)
> + link = &parent->rb_left;
> + else if (c > 0)
> + link = &parent->rb_right;
> + else
> + return parent;
> + }
> +
> + rb_link_node(node, parent, link);
> + rb_insert_color(node, tree);
> + return NULL;
> +}
> +
> +static inline struct rb_node *rb_find(struct rb_root *tree, const void *key,
> + int (*cmp)(const void *key, const struct rb_node *))
> +{
> + struct rb_node *node = tree->rb_node;
> +
> + while (node) {
> + int c = cmp(key, node);
> + if (c < 0) {
> + node = node->rb_left;
> + } else if (c > 0) {
> + node = node->rb_right;
> + } else
> + return node;
> + }
> +
> + return NULL;
> +}
> +
> +static inline struct rb_node *rb_find_first(struct rb_root *tree, const void *key,
> + int (*cmp)(const void *key, const struct rb_node *))
> +{
> + struct rb_node *node = tree->rb_node;
> + struct rb_node *match = NULL;
> +
> + while (node) {
> + int c = cmp(key, node);
> + if (c <= 0) {
> + if (!c)
> + match = node;
> + node = node->rb_left;
> + } else if (c > 0) {
> + node = node->rb_right;
> + }
> + }
> +
> + return match;
> +}
> +
> +static inline struct rb_node *rb_next_match(struct rb_node *node, const void *key,
> + int (*cmp)(const void *key, const struct rb_node *))
> +{
> + node = rb_next(node);
> + if (node && cmp(key, node))
> + node = NULL;
> + return node;
> +}
> +
> +#define rb_for_each(tree, node, key, cmp) \
> + for ((node) = rb_find_first((tree), (key), (cmp)); \
> + (node); (node) = rb_next_match((node), (key), (cmp)))
> +
> +
> +#endif /* _LINUX_RBTREE_H */
> --- a/tools/objtool/elf.c
> +++ b/tools/objtool/elf.c
> @@ -43,75 +43,24 @@ static void elf_hash_init(struct hlist_h
> #define elf_hash_for_each_possible(name, obj, member, key) \
> hlist_for_each_entry(obj, &name[hash_min(key, elf_hash_bits())], member)
>
> -static void rb_add(struct rb_root *tree, struct rb_node *node,
> - int (*cmp)(struct rb_node *, const struct rb_node *))
> -{
> - struct rb_node **link = &tree->rb_node;
> - struct rb_node *parent = NULL;
> -
> - while (*link) {
> - parent = *link;
> - if (cmp(node, parent) < 0)
> - link = &parent->rb_left;
> - else
> - link = &parent->rb_right;
> - }
> -
> - rb_link_node(node, parent, link);
> - rb_insert_color(node, tree);
> -}
> -
> -static struct rb_node *rb_find_first(struct rb_root *tree, const void *key,
> - int (*cmp)(const void *key, const struct rb_node *))
> -{
> - struct rb_node *node = tree->rb_node;
> - struct rb_node *match = NULL;
> -
> - while (node) {
> - int c = cmp(key, node);
> - if (c <= 0) {
> - if (!c)
> - match = node;
> - node = node->rb_left;
> - } else if (c > 0) {
> - node = node->rb_right;
> - }
> - }
> -
> - return match;
> -}
> -
> -static struct rb_node *rb_next_match(struct rb_node *node, const void *key,
> - int (*cmp)(const void *key, const struct rb_node *))
> -{
> - node = rb_next(node);
> - if (node && cmp(key, node))
> - node = NULL;
> - return node;
> -}
> -
> -#define rb_for_each(tree, node, key, cmp) \
> - for ((node) = rb_find_first((tree), (key), (cmp)); \
> - (node); (node) = rb_next_match((node), (key), (cmp)))
> -
> -static int symbol_to_offset(struct rb_node *a, const struct rb_node *b)
> +static bool symbol_to_offset(struct rb_node *a, const struct rb_node *b)
> {
> struct symbol *sa = rb_entry(a, struct symbol, node);
> struct symbol *sb = rb_entry(b, struct symbol, node);
>
> if (sa->offset < sb->offset)
> - return -1;
> + return true;
> if (sa->offset > sb->offset)
> - return 1;
> + return false;
>
> if (sa->len < sb->len)
> - return -1;
> + return true;
> if (sa->len > sb->len)
> - return 1;
> + return false;
>
> sa->alias = sb;
>
> - return 0;
> + return false;
> }
>
> static int symbol_by_offset(const void *key, const struct rb_node *node)
>
>
--
Michel "Walken" Lespinasse
A program is never fully debugged until the last user dies.
Powered by blists - more mailing lists