lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 12 Jul 2022 16:17:00 +0200
From:   Marco Elver <elver@...gle.com>
To:     Alexander Potapenko <glider@...gle.com>
Cc:     Alexander Viro <viro@...iv.linux.org.uk>,
        Alexei Starovoitov <ast@...nel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Andrey Konovalov <andreyknvl@...gle.com>,
        Andy Lutomirski <luto@...nel.org>,
        Arnd Bergmann <arnd@...db.de>, Borislav Petkov <bp@...en8.de>,
        Christoph Hellwig <hch@....de>,
        Christoph Lameter <cl@...ux.com>,
        David Rientjes <rientjes@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>,
        Eric Dumazet <edumazet@...gle.com>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Herbert Xu <herbert@...dor.apana.org.au>,
        Ilya Leoshkevich <iii@...ux.ibm.com>,
        Ingo Molnar <mingo@...hat.com>, Jens Axboe <axboe@...nel.dk>,
        Joonsoo Kim <iamjoonsoo.kim@....com>,
        Kees Cook <keescook@...omium.org>,
        Mark Rutland <mark.rutland@....com>,
        Matthew Wilcox <willy@...radead.org>,
        "Michael S. Tsirkin" <mst@...hat.com>,
        Pekka Enberg <penberg@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Petr Mladek <pmladek@...e.com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Vasily Gorbik <gor@...ux.ibm.com>,
        Vegard Nossum <vegard.nossum@...cle.com>,
        Vlastimil Babka <vbabka@...e.cz>, kasan-dev@...glegroups.com,
        linux-mm@...ck.org, linux-arch@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 02/45] stackdepot: reserve 5 extra bits in depot_stack_handle_t

On Fri, 1 Jul 2022 at 16:23, Alexander Potapenko <glider@...gle.com> wrote:
>
> Some users (currently only KMSAN) may want to use spare bits in
> depot_stack_handle_t. Let them do so by adding @extra_bits to
> __stack_depot_save() to store arbitrary flags, and providing
> stack_depot_get_extra_bits() to retrieve those flags.
>
> Also adapt KASAN to the new prototype by passing extra_bits=0, as KASAN
> does not intend to store additional information in the stack handle.
>
> Signed-off-by: Alexander Potapenko <glider@...gle.com>

Reviewed-by: Marco Elver <elver@...gle.com>


> ---
> v4:
>  -- per Marco Elver's request, fold "kasan: common: adapt to the new
>     prototype of __stack_depot_save()" into this patch to prevent
>     bisection breakages.
>
> Link: https://linux-review.googlesource.com/id/I0587f6c777667864768daf07821d594bce6d8ff9
> ---
>  include/linux/stackdepot.h |  8 ++++++++
>  lib/stackdepot.c           | 29 ++++++++++++++++++++++++-----
>  mm/kasan/common.c          |  2 +-
>  3 files changed, 33 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
> index bc2797955de90..9ca7798d7a318 100644
> --- a/include/linux/stackdepot.h
> +++ b/include/linux/stackdepot.h
> @@ -14,9 +14,15 @@
>  #include <linux/gfp.h>
>
>  typedef u32 depot_stack_handle_t;
> +/*
> + * Number of bits in the handle that stack depot doesn't use. Users may store
> + * information in them.
> + */
> +#define STACK_DEPOT_EXTRA_BITS 5
>
>  depot_stack_handle_t __stack_depot_save(unsigned long *entries,
>                                         unsigned int nr_entries,
> +                                       unsigned int extra_bits,
>                                         gfp_t gfp_flags, bool can_alloc);
>
>  /*
> @@ -59,6 +65,8 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
>  unsigned int stack_depot_fetch(depot_stack_handle_t handle,
>                                unsigned long **entries);
>
> +unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
> +
>  int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
>                        int spaces);
>
> diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> index 5ca0d086ef4a3..3d1dbdd5a87f6 100644
> --- a/lib/stackdepot.c
> +++ b/lib/stackdepot.c
> @@ -42,7 +42,8 @@
>  #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
>                                         STACK_ALLOC_ALIGN)
>  #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
> -               STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
> +               STACK_ALLOC_NULL_PROTECTION_BITS - \
> +               STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
>  #define STACK_ALLOC_SLABS_CAP 8192
>  #define STACK_ALLOC_MAX_SLABS \
>         (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
> @@ -55,6 +56,7 @@ union handle_parts {
>                 u32 slabindex : STACK_ALLOC_INDEX_BITS;
>                 u32 offset : STACK_ALLOC_OFFSET_BITS;
>                 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
> +               u32 extra : STACK_DEPOT_EXTRA_BITS;
>         };
>  };
>
> @@ -76,6 +78,14 @@ static int next_slab_inited;
>  static size_t depot_offset;
>  static DEFINE_RAW_SPINLOCK(depot_lock);
>
> +unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
> +{
> +       union handle_parts parts = { .handle = handle };
> +
> +       return parts.extra;
> +}
> +EXPORT_SYMBOL(stack_depot_get_extra_bits);
> +
>  static bool init_stack_slab(void **prealloc)
>  {
>         if (!*prealloc)
> @@ -139,6 +149,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
>         stack->handle.slabindex = depot_index;
>         stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
>         stack->handle.valid = 1;
> +       stack->handle.extra = 0;
>         memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
>         depot_offset += required_size;
>
> @@ -343,6 +354,7 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
>   *
>   * @entries:           Pointer to storage array
>   * @nr_entries:                Size of the storage array
> + * @extra_bits:                Flags to store in unused bits of depot_stack_handle_t
>   * @alloc_flags:       Allocation gfp flags
>   * @can_alloc:         Allocate stack slabs (increased chance of failure if false)
>   *
> @@ -354,6 +366,10 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
>   * If the stack trace in @entries is from an interrupt, only the portion up to
>   * interrupt entry is saved.
>   *
> + * Additional opaque flags can be passed in @extra_bits, stored in the unused
> + * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
> + * without calling stack_depot_fetch().
> + *
>   * Context: Any context, but setting @can_alloc to %false is required if
>   *          alloc_pages() cannot be used from the current context. Currently
>   *          this is the case from contexts where neither %GFP_ATOMIC nor
> @@ -363,10 +379,11 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
>   */
>  depot_stack_handle_t __stack_depot_save(unsigned long *entries,
>                                         unsigned int nr_entries,
> +                                       unsigned int extra_bits,
>                                         gfp_t alloc_flags, bool can_alloc)
>  {
>         struct stack_record *found = NULL, **bucket;
> -       depot_stack_handle_t retval = 0;
> +       union handle_parts retval = { .handle = 0 };
>         struct page *page = NULL;
>         void *prealloc = NULL;
>         unsigned long flags;
> @@ -450,9 +467,11 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
>                 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
>         }
>         if (found)
> -               retval = found->handle.handle;
> +               retval.handle = found->handle.handle;
>  fast_exit:
> -       return retval;
> +       retval.extra = extra_bits;
> +
> +       return retval.handle;
>  }
>  EXPORT_SYMBOL_GPL(__stack_depot_save);
>
> @@ -472,6 +491,6 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
>                                       unsigned int nr_entries,
>                                       gfp_t alloc_flags)
>  {
> -       return __stack_depot_save(entries, nr_entries, alloc_flags, true);
> +       return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
>  }
>  EXPORT_SYMBOL_GPL(stack_depot_save);
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index c40c0e7b3b5f1..ba4fceeec173c 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -36,7 +36,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
>         unsigned int nr_entries;
>
>         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
> -       return __stack_depot_save(entries, nr_entries, flags, can_alloc);
> +       return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
>  }
>
>  void kasan_set_track(struct kasan_track *track, gfp_t flags)
> --
> 2.37.0.rc0.161.g10f37bed90-goog
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ