lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANpmjNMrwXxU0YCwvHo59RFDkoxA-MtdrRCSPoRW+KYG2ez-NQ@mail.gmail.com>
Date:   Tue, 19 Jul 2022 13:41:04 +0200
From:   Marco Elver <elver@...gle.com>
To:     andrey.konovalov@...ux.dev
Cc:     Alexander Potapenko <glider@...gle.com>,
        Andrey Konovalov <andreyknvl@...il.com>,
        Dmitry Vyukov <dvyukov@...gle.com>,
        Andrey Ryabinin <ryabinin.a.a@...il.com>,
        kasan-dev@...glegroups.com, Peter Collingbourne <pcc@...gle.com>,
        Evgenii Stepanov <eugenis@...gle.com>,
        Florian Mayer <fmayer@...gle.com>,
        Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org,
        Andrey Konovalov <andreyknvl@...gle.com>
Subject: Re: [PATCH mm v2 30/33] kasan: implement stack ring for tag-based modes

On Tue, 19 Jul 2022 at 02:15, <andrey.konovalov@...ux.dev> wrote:
>
> From: Andrey Konovalov <andreyknvl@...gle.com>
>
> Implement storing stack depot handles for alloc/free stack traces for
> slab objects for the tag-based KASAN modes in a ring buffer.
>
> This ring buffer is referred to as the stack ring.
>
> On each alloc/free of a slab object, the tagged address of the object and
> the current stack trace are recorded in the stack ring.
>
> On each bug report, if the accessed address belongs to a slab object, the
> stack ring is scanned for matching entries. The newest entries are used to
> print the alloc/free stack traces in the report: one entry for alloc and
> one for free.
>
> The number of entries in the stack ring is fixed in this patch, but one of
> the following patches adds a command-line argument to control it.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@...gle.com>
>
> ---
>
> Changes v1->v2:
> - Only use the atomic type for pos, use READ/WRITE_ONCE() for the rest.
> - Rename KASAN_STACK_RING_ENTRIES to KASAN_STACK_RING_SIZE.
> - Rename object local variable in kasan_complete_mode_report_info() to
>   ptr to match the name in kasan_stack_ring_entry.
> - Detect stack ring entry slots that are being written to.
> - Use read-write lock to disallow reading half-written stack ring entries.
> - Add a comment about the stack ring being best-effort.
> ---
>  mm/kasan/kasan.h       | 21 ++++++++++++
>  mm/kasan/report_tags.c | 76 ++++++++++++++++++++++++++++++++++++++++++
>  mm/kasan/tags.c        | 50 +++++++++++++++++++++++++++
>  3 files changed, 147 insertions(+)
>
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 7df107dc400a..cfff81139d67 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -2,6 +2,7 @@
>  #ifndef __MM_KASAN_KASAN_H
>  #define __MM_KASAN_KASAN_H
>
> +#include <linux/atomic.h>
>  #include <linux/kasan.h>
>  #include <linux/kasan-tags.h>
>  #include <linux/kfence.h>
> @@ -233,6 +234,26 @@ struct kasan_free_meta {
>
>  #endif /* CONFIG_KASAN_GENERIC */
>
> +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
> +
> +struct kasan_stack_ring_entry {
> +       void *ptr;
> +       size_t size;
> +       u32 pid;
> +       depot_stack_handle_t stack;
> +       bool is_free;
> +};
> +
> +#define KASAN_STACK_RING_SIZE (32 << 10)
> +
> +struct kasan_stack_ring {
> +       rwlock_t lock;
> +       atomic64_t pos;
> +       struct kasan_stack_ring_entry entries[KASAN_STACK_RING_SIZE];
> +};
> +
> +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
> +
>  #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
>  /* Used in KUnit-compatible KASAN tests. */
>  struct kunit_kasan_status {
> diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
> index 5cbac2cdb177..a996489e6dac 100644
> --- a/mm/kasan/report_tags.c
> +++ b/mm/kasan/report_tags.c
> @@ -4,8 +4,12 @@
>   * Copyright (c) 2020 Google, Inc.
>   */
>
> +#include <linux/atomic.h>
> +
>  #include "kasan.h"
>
> +extern struct kasan_stack_ring stack_ring;
> +
>  static const char *get_bug_type(struct kasan_report_info *info)
>  {
>         /*
> @@ -24,5 +28,77 @@ static const char *get_bug_type(struct kasan_report_info *info)
>
>  void kasan_complete_mode_report_info(struct kasan_report_info *info)
>  {
> +       unsigned long flags;
> +       u64 pos;
> +       struct kasan_stack_ring_entry *entry;
> +       void *ptr;
> +       u32 pid;
> +       depot_stack_handle_t stack;
> +       bool is_free;
> +       bool alloc_found = false, free_found = false;
> +
>         info->bug_type = get_bug_type(info);
> +
> +       if (!info->cache || !info->object)
> +               return;
> +       }
> +
> +       write_lock_irqsave(&stack_ring.lock, flags);
> +
> +       pos = atomic64_read(&stack_ring.pos);
> +
> +       /*
> +        * The loop below tries to find stack ring entries relevant to the
> +        * buggy object. This is a best-effort process.
> +        *
> +        * First, another object with the same tag can be allocated in place of
> +        * the buggy object. Also, since the number of entries is limited, the
> +        * entries relevant to the buggy object can be overwritten.
> +        */
> +
> +       for (u64 i = pos - 1; i != pos - 1 - KASAN_STACK_RING_SIZE; i--) {
> +               if (alloc_found && free_found)
> +                       break;
> +
> +               entry = &stack_ring.entries[i % KASAN_STACK_RING_SIZE];
> +
> +               /* Paired with smp_store_release() in save_stack_info(). */
> +               ptr = (void *)smp_load_acquire(&entry->ptr);
> +
> +               if (kasan_reset_tag(ptr) != info->object ||
> +                   get_tag(ptr) != get_tag(info->access_addr))
> +                       continue;
> +
> +               pid = READ_ONCE(entry->pid);
> +               stack = READ_ONCE(entry->stack);
> +               is_free = READ_ONCE(entry->is_free);
> +
> +               /* Try detecting if the entry was changed while being read. */
> +               smp_mb();
> +               if (ptr != (void *)READ_ONCE(entry->ptr))
> +                       continue;

I thought the re-validation is no longer needed because of the rwlock
protection?

The rest looks fine now.

> +               if (is_free) {
> +                       /*
> +                        * Second free of the same object.
> +                        * Give up on trying to find the alloc entry.
> +                        */
> +                       if (free_found)
> +                               break;
> +
> +                       info->free_track.pid = pid;
> +                       info->free_track.stack = stack;
> +                       free_found = true;
> +               } else {
> +                       /* Second alloc of the same object. Give up. */
> +                       if (alloc_found)
> +                               break;
> +
> +                       info->alloc_track.pid = pid;
> +                       info->alloc_track.stack = stack;
> +                       alloc_found = true;
> +               }
> +       }
> +
> +       write_unlock_irqrestore(&stack_ring.lock, flags);
>  }
> diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
> index 39a0481e5228..07828021c1f5 100644
> --- a/mm/kasan/tags.c
> +++ b/mm/kasan/tags.c
> @@ -6,6 +6,7 @@
>   * Copyright (c) 2020 Google, Inc.
>   */
>
> +#include <linux/atomic.h>
>  #include <linux/init.h>
>  #include <linux/kasan.h>
>  #include <linux/kernel.h>
> @@ -16,11 +17,60 @@
>  #include <linux/types.h>
>
>  #include "kasan.h"
> +#include "../slab.h"
> +
> +/* Non-zero, as initial pointer values are 0. */
> +#define STACK_RING_BUSY_PTR ((void *)1)
> +
> +struct kasan_stack_ring stack_ring;
> +
> +static void save_stack_info(struct kmem_cache *cache, void *object,
> +                       gfp_t gfp_flags, bool is_free)
> +{
> +       unsigned long flags;
> +       depot_stack_handle_t stack;
> +       u64 pos;
> +       struct kasan_stack_ring_entry *entry;
> +       void *old_ptr;
> +
> +       stack = kasan_save_stack(gfp_flags, true);
> +
> +       /*
> +        * Prevent save_stack_info() from modifying stack ring
> +        * when kasan_complete_mode_report_info() is walking it.
> +        */
> +       read_lock_irqsave(&stack_ring.lock, flags);
> +
> +next:
> +       pos = atomic64_fetch_add(1, &stack_ring.pos);
> +       entry = &stack_ring.entries[pos % KASAN_STACK_RING_SIZE];
> +
> +       /* Detect stack ring entry slots that are being written to. */
> +       old_ptr = READ_ONCE(entry->ptr);
> +       if (old_ptr == STACK_RING_BUSY_PTR)
> +               goto next; /* Busy slot. */
> +       if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
> +               goto next; /* Busy slot. */
> +
> +       WRITE_ONCE(entry->size, cache->object_size);
> +       WRITE_ONCE(entry->pid, current->pid);
> +       WRITE_ONCE(entry->stack, stack);
> +       WRITE_ONCE(entry->is_free, is_free);
> +
> +       /*
> +        * Paired with smp_load_acquire() in kasan_complete_mode_report_info().
> +        */
> +       smp_store_release(&entry->ptr, (s64)object);
> +
> +       read_unlock_irqrestore(&stack_ring.lock, flags);
> +}
>
>  void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
>  {
> +       save_stack_info(cache, object, flags, false);
>  }
>
>  void kasan_save_free_info(struct kmem_cache *cache, void *object)
>  {
> +       save_stack_info(cache, object, GFP_NOWAIT, true);
>  }
> --
> 2.25.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ