lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 24 Mar 2020 17:23:30 +0100
From:   Marco Elver <elver@...gle.com>
To:     Will Deacon <will@...nel.org>
Cc:     LKML <linux-kernel@...r.kernel.org>,
        Eric Dumazet <edumazet@...gle.com>,
        Jann Horn <jannh@...gle.com>,
        Kees Cook <keescook@...omium.org>,
        Maddie Stone <maddiestone@...gle.com>,
        "Paul E . McKenney" <paulmck@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Thomas Gleixner <tglx@...utronix.de>, kernel-team@...roid.com,
        kernel-hardening@...ts.openwall.com
Subject: Re: [RFC PATCH 03/21] list: Annotate lockless list primitives with data_race()

On Tue, 24 Mar 2020 at 16:37, Will Deacon <will@...nel.org> wrote:
>
> Some list predicates can be used locklessly even with the non-RCU list
> implementations, since they effectively boil down to a test against
> NULL. For example, checking whether or not a list is empty is safe even
> in the presence of a concurrent, tearing write to the list head pointer.
> Similarly, checking whether or not an hlist node has been hashed is safe
> as well.
>
> Annotate these lockless list predicates with data_race() and READ_ONCE()
> so that KCSAN and the compiler are aware of what's going on. The writer
> side can then avoid having to use WRITE_ONCE() in the non-RCU
> implementation.
>
> Cc: Paul E. McKenney <paulmck@...nel.org>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Marco Elver <elver@...gle.com>
> Signed-off-by: Will Deacon <will@...nel.org>
> ---
>  include/linux/list.h       | 10 +++++-----
>  include/linux/list_bl.h    |  5 +++--
>  include/linux/list_nulls.h |  6 +++---
>  include/linux/llist.h      |  2 +-
>  4 files changed, 12 insertions(+), 11 deletions(-)
>
> diff --git a/include/linux/list.h b/include/linux/list.h
> index 4fed5a0f9b77..4d9f5f9ed1a8 100644
> --- a/include/linux/list.h
> +++ b/include/linux/list.h
> @@ -279,7 +279,7 @@ static inline int list_is_last(const struct list_head *list,
>   */
>  static inline int list_empty(const struct list_head *head)
>  {
> -       return READ_ONCE(head->next) == head;
> +       return data_race(READ_ONCE(head->next) == head);

Double-marking should never be necessary, at least if you want to make
KCSAN happy. From what I gather there is an unmarked write somewhere,
correct? In that case, KCSAN will still complain because if it sees a
race between this read and the other write, then at least one is still
plain (the write).

Then, my suggestion would be to mark the write with data_race() and
just leave this as a READ_ONCE(). Having a data_race() somewhere only
makes KCSAN stop reporting the race if the paired access is also
marked (be it with data_race() or _ONCE, etc.).

Alternatively, if marking the write is impossible, you can surround
the access with kcsan_disable_current()/kcsan_enable_current(). Or, as
a last resort, just leaving as-is is fine too, because KCSAN's default
config (still) has KCSAN_ASSUME_PLAIN_WRITES_ATOMIC selected.

Thanks,
-- Marco

>  }
>
>  /**
> @@ -524,7 +524,7 @@ static inline void list_splice_tail_init(struct list_head *list,
>   */
>  #define list_first_entry_or_null(ptr, type, member) ({ \
>         struct list_head *head__ = (ptr); \
> -       struct list_head *pos__ = READ_ONCE(head__->next); \
> +       struct list_head *pos__ = data_race(READ_ONCE(head__->next)); \
>         pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
>  })
>
> @@ -772,13 +772,13 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h)
>   * hlist_unhashed - Has node been removed from list and reinitialized?
>   * @h: Node to be checked
>   *
> - * Not that not all removal functions will leave a node in unhashed
> + * Note that not all removal functions will leave a node in unhashed
>   * state.  For example, hlist_nulls_del_init_rcu() does leave the
>   * node in unhashed state, but hlist_nulls_del() does not.
>   */
>  static inline int hlist_unhashed(const struct hlist_node *h)
>  {
> -       return !READ_ONCE(h->pprev);
> +       return data_race(!READ_ONCE(h->pprev));
>  }
>
>  /**
> @@ -787,7 +787,7 @@ static inline int hlist_unhashed(const struct hlist_node *h)
>   */
>  static inline int hlist_empty(const struct hlist_head *h)
>  {
> -       return !READ_ONCE(h->first);
> +       return data_race(!READ_ONCE(h->first));
>  }
>
>  static inline void __hlist_del(struct hlist_node *n)
> diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
> index ae1b541446c9..1804fdb84dda 100644
> --- a/include/linux/list_bl.h
> +++ b/include/linux/list_bl.h
> @@ -51,7 +51,7 @@ static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
>
>  static inline bool  hlist_bl_unhashed(const struct hlist_bl_node *h)
>  {
> -       return !h->pprev;
> +       return data_race(!READ_ONCE(h->pprev));
>  }
>
>  static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
> @@ -71,7 +71,8 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h,
>
>  static inline bool hlist_bl_empty(const struct hlist_bl_head *h)
>  {
> -       return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK);
> +       unsigned long first = data_race((unsigned long)READ_ONCE(h->first));
> +       return !(first & ~LIST_BL_LOCKMASK);
>  }
>
>  static inline void hlist_bl_add_head(struct hlist_bl_node *n,
> diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
> index 3a9ff01e9a11..fa51a801bf32 100644
> --- a/include/linux/list_nulls.h
> +++ b/include/linux/list_nulls.h
> @@ -60,18 +60,18 @@ static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
>   * hlist_nulls_unhashed - Has node been removed and reinitialized?
>   * @h: Node to be checked
>   *
> - * Not that not all removal functions will leave a node in unhashed state.
> + * Note that not all removal functions will leave a node in unhashed state.
>   * For example, hlist_del_init_rcu() leaves the node in unhashed state,
>   * but hlist_nulls_del() does not.
>   */
>  static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
>  {
> -       return !READ_ONCE(h->pprev);
> +       return data_race(!READ_ONCE(h->pprev));
>  }
>
>  static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
>  {
> -       return is_a_nulls(READ_ONCE(h->first));
> +       return data_race(is_a_nulls(READ_ONCE(h->first)));
>  }
>
>  static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
> diff --git a/include/linux/llist.h b/include/linux/llist.h
> index 2e9c7215882b..c7f042b73899 100644
> --- a/include/linux/llist.h
> +++ b/include/linux/llist.h
> @@ -186,7 +186,7 @@ static inline void init_llist_head(struct llist_head *list)
>   */
>  static inline bool llist_empty(const struct llist_head *head)
>  {
> -       return READ_ONCE(head->first) == NULL;
> +       return data_race(READ_ONCE(head->first) == NULL);
>  }
>
>  static inline struct llist_node *llist_next(struct llist_node *node)
> --
> 2.20.1
>

Powered by blists - more mailing lists