[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACT4Y+Yt6KjcvOehj6VV=0-W+mGuzh1vOd3dH9DbnPW9h04tQA@mail.gmail.com>
Date: Tue, 21 Sep 2021 12:50:04 +0200
From: Dmitry Vyukov <dvyukov@...gle.com>
To: Marco Elver <elver@...gle.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Alexander Potapenko <glider@...gle.com>,
Jann Horn <jannh@...gle.com>,
Aleksandr Nogikh <nogikh@...gle.com>,
Taras Madan <tarasmadan@...gle.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
kasan-dev@...glegroups.com
Subject: Re: [PATCH v2 3/5] kfence: move saving stack trace of allocations
into __kfence_alloc()
On Tue, 21 Sept 2021 at 12:10, 'Marco Elver' via kasan-dev
<kasan-dev@...glegroups.com> wrote:
>
> Move the saving of the stack trace of allocations into __kfence_alloc(),
> so that the stack entries array can be used outside of
> kfence_guarded_alloc() and we avoid potentially unwinding the stack
> multiple times.
>
> Signed-off-by: Marco Elver <elver@...gle.com>
Reviewed-by: Dmitry Vyukov <dvyukov@...gle.com>
> ---
> v2:
> * New patch.
> ---
> mm/kfence/core.c | 35 ++++++++++++++++++++++++-----------
> 1 file changed, 24 insertions(+), 11 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 249d75b7e5ee..db01814f8ff0 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -187,19 +187,26 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
> * Update the object's metadata state, including updating the alloc/free stacks
> * depending on the state transition.
> */
> -static noinline void metadata_update_state(struct kfence_metadata *meta,
> - enum kfence_object_state next)
> +static noinline void
> +metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
> + unsigned long *stack_entries, size_t num_stack_entries)
> {
> struct kfence_track *track =
> next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
>
> lockdep_assert_held(&meta->lock);
>
> - /*
> - * Skip over 1 (this) functions; noinline ensures we do not accidentally
> - * skip over the caller by never inlining.
> - */
> - track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
> + if (stack_entries) {
> + memcpy(track->stack_entries, stack_entries,
> + num_stack_entries * sizeof(stack_entries[0]));
> + } else {
> + /*
> + * Skip over 1 (this) functions; noinline ensures we do not
> + * accidentally skip over the caller by never inlining.
> + */
> + num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
> + }
> + track->num_stack_entries = num_stack_entries;
> track->pid = task_pid_nr(current);
> track->cpu = raw_smp_processor_id();
> track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
> @@ -261,7 +268,8 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
> }
> }
>
> -static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
> +static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
> + unsigned long *stack_entries, size_t num_stack_entries)
> {
> struct kfence_metadata *meta = NULL;
> unsigned long flags;
> @@ -320,7 +328,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
> addr = (void *)meta->addr;
>
> /* Update remaining metadata. */
> - metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
> + metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
> /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
> WRITE_ONCE(meta->cache, cache);
> meta->size = size;
> @@ -400,7 +408,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
> memzero_explicit(addr, meta->size);
>
> /* Mark the object as freed. */
> - metadata_update_state(meta, KFENCE_OBJECT_FREED);
> + metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
>
> raw_spin_unlock_irqrestore(&meta->lock, flags);
>
> @@ -742,6 +750,9 @@ void kfence_shutdown_cache(struct kmem_cache *s)
>
> void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
> {
> + unsigned long stack_entries[KFENCE_STACK_DEPTH];
> + size_t num_stack_entries;
> +
> /*
> * Perform size check before switching kfence_allocation_gate, so that
> * we don't disable KFENCE without making an allocation.
> @@ -786,7 +797,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
> if (!READ_ONCE(kfence_enabled))
> return NULL;
>
> - return kfence_guarded_alloc(s, size, flags);
> + num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
> +
> + return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries);
> }
>
> size_t kfence_ksize(const void *addr)
> --
> 2.33.0.464.g1972c5931b-goog
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@...glegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20210921101014.1938382-3-elver%40google.com.
Powered by blists - more mailing lists