[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <be221a24-fec2-4ae4-9020-c684902870e7@suse.cz>
Date: Wed, 3 Jul 2024 12:47:35 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Suren Baghdasaryan <surenb@...gle.com>, akpm@...ux-foundation.org
Cc: kent.overstreet@...ux.dev, cl@...ux.com, penberg@...nel.org,
rientjes@...gle.com, iamjoonsoo.kim@....com, roman.gushchin@...ux.dev,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/1] mm, slab: move allocation tagging code in the alloc
path into a hook
On 7/3/24 3:53 AM, Suren Baghdasaryan wrote:
> Move allocation tagging specific code in the allocation path into
> alloc_tagging_slab_alloc_hook, similar to how freeing path uses
> alloc_tagging_slab_free_hook. No functional changes, just code
> cleanup.
>
> Suggested-by: Vlastimil Babka <vbabka@...e.cz>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
> ---
> mm/slub.c | 34 +++++++++++++++++++++++++++++-----
> 1 file changed, 29 insertions(+), 5 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 4927edec6a8c..99d53190cfcf 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2033,11 +2033,18 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> return slab_obj_exts(slab) + obj_to_index(s, slab, p);
> }
>
> +#ifdef CONFIG_MEM_ALLOC_PROFILING
I think if you extract this whole #ifdef CONFIG_MEM_ALLOC_PROFILING section
to go outside of CONFIG_SLAB_OBJ_EXT sections, i.e. below the final
#endif /* CONFIG_SLAB_OBJ_EXT */
then it wouldn't be necessary to have two instances of the empty hooks?
> +
> +static inline void
> +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
> +{
> + alloc_tag_add(&obj_exts->ref, current->alloc_tag, size);
> +}
> +
> static inline void
> alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> int objects)
> {
> -#ifdef CONFIG_MEM_ALLOC_PROFILING
> struct slabobj_ext *obj_exts;
> int i;
>
> @@ -2053,9 +2060,23 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
>
> alloc_tag_sub(&obj_exts[off].ref, s->size);
> }
> -#endif
> }
>
> +#else /* CONFIG_MEM_ALLOC_PROFILING */
> +
> +static inline void
> +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
> +{
> +}
> +
> +static inline void
> +alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> + int objects)
> +{
> +}
> +
> +#endif /* CONFIG_MEM_ALLOC_PROFILING*/
> +
> #else /* CONFIG_SLAB_OBJ_EXT */
>
> static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> @@ -2079,6 +2100,11 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> return NULL;
> }
>
> +static inline void
> +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
> +{
> +}
> +
> static inline void
> alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> int objects)
> @@ -3944,7 +3970,6 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
> kmemleak_alloc_recursive(p[i], s->object_size, 1,
> s->flags, init_flags);
> kmsan_slab_alloc(s, p[i], init_flags);
> -#ifdef CONFIG_MEM_ALLOC_PROFILING
> if (need_slab_obj_ext()) {
> struct slabobj_ext *obj_exts;
>
> @@ -3955,9 +3980,8 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
> * check should be added before alloc_tag_add().
> */
> if (likely(obj_exts))
> - alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
> + alloc_tagging_slab_alloc_hook(obj_exts, s->size);
> }
Could this whole "if (need_slab_obj_ext())" block be part of
alloc_tagging_slab_alloc_hook()? That would match
__memcg_slab_post_alloc_hook also taking care of calloing
alloc_slab_obj_exts on its own. Maybe then we won't even need empty versions
of prepare_slab_obj_exts_hook()
> -#endif
> }
>
> return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
>
> base-commit: e9d22f7a6655941fc8b2b942ed354ec780936b3e
Powered by blists - more mailing lists