[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2772cf56-4183-857f-d070-c54bceb5c8d9@suse.cz>
Date: Tue, 31 Aug 2021 14:06:59 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Imran Khan <imran.f.khan@...cle.com>, cl@...ux.com,
akpm@...ux-foundation.org
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
David Rientjes <rientjes@...gle.com>,
Pekka Enberg <penberg@...nel.org>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Oliver Glitta <glittao@...il.com>
Subject: Re: [RFC PATCH 2/2] mm, slub: Use stackdepot to store user
information for slub object.
On 8/31/21 08:25, Imran Khan wrote:
> SLAB_STORE_USER causes information about allocating and freeing context
> of a slub object, to be stored in metadata area in a couple of struct
> track objects. These objects store allocation and/or freeing stack trace
> in an array. This may result in same stack trace getting stored in metadata
> area of multiple objects.
> STACKDEPOT can be used to store unique stack traces without any
> duplication,so use STACKDEPOT to store allocation and/or freeing stack
> traces as well.
> This results in low memory footprint, as we are not storing multiple
> copies of the same stack trace for an allocation or free.
>
> Signed-off-by: Imran Khan <imran.f.khan@...cle.com>
> ---
> mm/slub.c | 87 ++++++++++++++++++++++++++++++-------------------------
> 1 file changed, 48 insertions(+), 39 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index df1ac8aff86f..8e2a2b837106 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -264,8 +264,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
> #define TRACK_ADDRS_COUNT 16
> struct track {
> unsigned long addr; /* Called from address */
> -#ifdef CONFIG_STACKTRACE
> - unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
> +#ifdef CONFIG_STACKDEPOT
> + depot_stack_handle_t stack;
> #endif
> int cpu; /* Was running on cpu */
> int pid; /* Pid context */
> @@ -668,6 +668,27 @@ static inline unsigned int get_info_end(struct kmem_cache *s)
> return s->inuse;
> }
>
> +#ifdef CONFIG_STACKDEPOT
> +static depot_stack_handle_t slub_debug_save_stack(gfp_t flags)
> +{
> + unsigned long entries[TRACK_ADDRS_COUNT];
> + unsigned int nr_entries;
> +
> + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 4);
> + nr_entries = filter_irq_stacks(entries, nr_entries);
> + return stack_depot_save(entries, nr_entries, flags);
> +}
> +
> +static void print_stack(depot_stack_handle_t stack)
> +{
> + unsigned long *entries;
> + unsigned int nr_entries;
> +
> + nr_entries = stack_depot_fetch(stack, &entries);
> + stack_trace_print(entries, nr_entries, 0);
> +}
This function could become part of stackdepot itself?
> +#endif
> +
> static struct track *get_track(struct kmem_cache *s, void *object,
> enum track_item alloc)
> {
> @@ -679,21 +700,13 @@ static struct track *get_track(struct kmem_cache *s, void *object,
> }
>
> static void set_track(struct kmem_cache *s, void *object,
> - enum track_item alloc, unsigned long addr)
> + enum track_item alloc, unsigned long addr, gfp_t flags)
> {
> struct track *p = get_track(s, object, alloc);
>
> if (addr) {
> -#ifdef CONFIG_STACKTRACE
> - unsigned int nr_entries;
> -
> - metadata_access_enable();
> - nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
> - TRACK_ADDRS_COUNT, 3);
> - metadata_access_disable();
> -
> - if (nr_entries < TRACK_ADDRS_COUNT)
> - p->addrs[nr_entries] = 0;
> +#ifdef CONFIG_STACKDEPOT
> + p->stack = slub_debug_save_stack(flags);
> #endif
> p->addr = addr;
> p->cpu = smp_processor_id();
> @@ -709,10 +722,11 @@ static void init_tracking(struct kmem_cache *s, void *object)
> if (!(s->flags & SLAB_STORE_USER))
> return;
>
> - set_track(s, object, TRACK_FREE, 0UL);
> - set_track(s, object, TRACK_ALLOC, 0UL);
> + set_track(s, object, TRACK_FREE, 0UL, 0U);
> + set_track(s, object, TRACK_ALLOC, 0UL, 0U);
> }
>
> +
> static void print_track(const char *s, struct track *t, unsigned long pr_time)
> {
> if (!t->addr)
> @@ -720,15 +734,11 @@ static void print_track(const char *s, struct track *t, unsigned long pr_time)
>
> pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
> s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
> -#ifdef CONFIG_STACKTRACE
> - {
> - int i;
> - for (i = 0; i < TRACK_ADDRS_COUNT; i++)
> - if (t->addrs[i])
> - pr_err("\t%pS\n", (void *)t->addrs[i]);
> - else
> - break;
> - }
> +#ifdef CONFIG_STACKDEPOT
> + if (t->stack)
> + print_stack(t->stack);
> + else
> + pr_err("(stack is not available)\n");
> #endif
> }
>
> @@ -1261,7 +1271,8 @@ static inline int alloc_consistency_checks(struct kmem_cache *s,
>
> static noinline int alloc_debug_processing(struct kmem_cache *s,
> struct page *page,
> - void *object, unsigned long addr)
> + void *object, unsigned long addr,
> + gfp_t flags)
> {
> if (s->flags & SLAB_CONSISTENCY_CHECKS) {
> if (!alloc_consistency_checks(s, page, object))
> @@ -1270,7 +1281,7 @@ static noinline int alloc_debug_processing(struct kmem_cache *s,
>
> /* Success perform special debug activities for allocs */
> if (s->flags & SLAB_STORE_USER)
> - set_track(s, object, TRACK_ALLOC, addr);
> + set_track(s, object, TRACK_ALLOC, addr, flags);
> trace(s, page, object, 1);
> init_object(s, object, SLUB_RED_ACTIVE);
> return 1;
> @@ -1350,7 +1361,7 @@ static noinline int free_debug_processing(
> }
>
> if (s->flags & SLAB_STORE_USER)
> - set_track(s, object, TRACK_FREE, addr);
> + set_track(s, object, TRACK_FREE, addr, GFP_NOWAIT);
> trace(s, page, object, 0);
> /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
> init_object(s, object, SLUB_RED_INACTIVE);
> @@ -2987,7 +2998,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
> check_new_page:
>
> if (kmem_cache_debug(s)) {
> - if (!alloc_debug_processing(s, page, freelist, addr)) {
> + if (!alloc_debug_processing(s, page, freelist, addr, gfpflags)) {
> /* Slab failed checks. Next slab needed */
> goto new_slab;
> } else {
> @@ -4275,6 +4286,8 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
> void *objp0;
> struct kmem_cache *s = page->slab_cache;
> struct track __maybe_unused *trackp;
> + unsigned long __maybe_unused *entries;
> + unsigned int __maybe_unused nr_entries;
>
> kpp->kp_ptr = object;
> kpp->kp_page = page;
> @@ -4297,19 +4310,15 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
> objp = fixup_red_left(s, objp);
> trackp = get_track(s, objp, TRACK_ALLOC);
> kpp->kp_ret = (void *)trackp->addr;
> -#ifdef CONFIG_STACKTRACE
> - for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
> - kpp->kp_stack[i] = (void *)trackp->addrs[i];
> - if (!kpp->kp_stack[i])
> - break;
> - }
> +#ifdef CONFIG_STACKDEPOT
> + nr_entries = stack_depot_fetch(trackp->stack, &entries);
> + for (i = 0; i < nr_entries; i++)
> + kpp->kp_stack[i] = (void *)entries[i];
Hmm, in case stack_depot_save() fails and returns a zero handle (e.g. due to
enomem) this seems to rely on stack_depot_fetch() returning gracefully with
zero nr_entries for a zero handle. But I don't see such guarantee?
stack_depot_init() isn't creating such entry and stack_depot_save() doesn't
have such check. So it will work accidentally, or return garbage? But it
would be IMHO useful to add such guarantee to stackdepot one way or another.
> trackp = get_track(s, objp, TRACK_FREE);
> - for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
> - kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
> - if (!kpp->kp_free_stack[i])
> - break;
> - }
> + nr_entries = stack_depot_fetch(trackp->stack, &entries);
> + for (i = 0; i < nr_entries; i++)
> + kpp->kp_free_stack[i] = (void *)entries[i];
> #endif
> #endif
> }
>
Powered by blists - more mailing lists