[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <mw52yqm7wfe5afb5ybvfkpdgo4pm4wobmzv3um2cov3amekzmi@ye5drijf7njj>
Date: Wed, 23 Jul 2025 07:31:38 -0400
From: Kent Overstreet <kent.overstreet@...ux.dev>
To: Zhenhua Huang <quic_zhenhuah@...cinc.com>
Cc: rientjes@...gle.com, vbabka@...e.cz, cl@...two.org,
roman.gushchin@...ux.dev, harry.yoo@...cle.com, surenb@...gle.com,
pasha.tatashin@...een.com, akpm@...ux-foundation.org, corbet@....net, linux-mm@...ck.org,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org, quic_tingweiz@...cinc.com
Subject: Re: [PATCH 1/1] mm: slub: Introduce one knob to control the track of
slub object
On Wed, Jul 23, 2025 at 04:03:28PM +0800, Zhenhua Huang wrote:
> Mem profiling feature tracks both "alloc_slab_page"(page level) and slub
> object level allocations. To track object level allocations,
> slabobj_ext consumes 16 bytes per object for profiling slub object if
> CONFIG_MEMCG is set.
> Based on the data I've collected, this overhead accounts for approximately
> 5.7% of slub memory usage — a considerable cost.
> w/ noslub slub_debug=-
> Slab: 87520 kB
> w/o noslub slub_debug=-
> Slab: 92812 kB
>
> While In some scenarios, we may choose not to delve into SLUB allocation
> details if initial triage indicates that SLUB memory usage is within
> acceptable limits. To support this, a control knob is introduced to enable
> or disable SLUB object tracking.
> The "noslub" knob disables SLUB tracking, preventing further allocation of
> slabobj_ext structures.
...Have there been actual scenarios where this would be useful?
We've already got a knob for memory allocation profiling as a whole;
most allocations are slub allocations, so if you're looking at memory
allocation profiling you probably want slub.
>
> Signed-off-by: Zhenhua Huang <quic_zhenhuah@...cinc.com>
> ---
> Documentation/mm/allocation-profiling.rst | 7 +++++-
> include/linux/alloc_tag.h | 8 +++++++
> lib/alloc_tag.c | 26 +++++++++++++++++------
> mm/slub.c | 10 ++++-----
> 4 files changed, 38 insertions(+), 13 deletions(-)
>
> diff --git a/Documentation/mm/allocation-profiling.rst b/Documentation/mm/allocation-profiling.rst
> index 316311240e6a..9ecae74e0365 100644
> --- a/Documentation/mm/allocation-profiling.rst
> +++ b/Documentation/mm/allocation-profiling.rst
> @@ -18,7 +18,7 @@ kconfig options:
> missing annotation
>
> Boot parameter:
> - sysctl.vm.mem_profiling={0|1|never}[,compressed]
> + sysctl.vm.mem_profiling={0|1|never}[,compressed][,noslub]
>
> When set to "never", memory allocation profiling overhead is minimized and it
> cannot be enabled at runtime (sysctl becomes read-only).
> @@ -30,6 +30,11 @@ Boot parameter:
> If compression fails, a warning is issued and memory allocation profiling gets
> disabled.
>
> + The optional noslub parameter disables tracking of individual SLUB objects. This
> + approach, similar to how page owner tracking works, relies on slub_debug for SLUB
> + object insights instead. While this reduces memory overhead, it also limits the
> + ability to observe detailed SLUB allocation behavior.
> +
> sysctl:
> /proc/sys/vm/mem_profiling
>
> diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
> index 8f7931eb7d16..af3c139712ce 100644
> --- a/include/linux/alloc_tag.h
> +++ b/include/linux/alloc_tag.h
> @@ -134,6 +134,13 @@ static inline bool mem_alloc_profiling_enabled(void)
> &mem_alloc_profiling_key);
> }
>
> +DECLARE_STATIC_KEY_TRUE(slub_mem_alloc_profiling_key);
> +
> +static inline bool slub_mem_alloc_profiling_enabled(void)
> +{
> + return static_key_enabled(&slub_mem_alloc_profiling_key);
> +}
> +
> static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
> {
> struct alloc_tag_counters v = { 0, 0 };
> @@ -227,6 +234,7 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
>
> #define DEFINE_ALLOC_TAG(_alloc_tag)
> static inline bool mem_alloc_profiling_enabled(void) { return false; }
> +static inline bool slub_mem_alloc_profiling_enabled(void) { return false; }
> static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
> size_t bytes) {}
> static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
> diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
> index 0142bc916f73..b79b0d987427 100644
> --- a/lib/alloc_tag.c
> +++ b/lib/alloc_tag.c
> @@ -33,6 +33,8 @@ DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
> EXPORT_SYMBOL(mem_alloc_profiling_key);
>
> DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed);
> +DEFINE_STATIC_KEY_TRUE(slub_mem_alloc_profiling_key);
> +EXPORT_SYMBOL(slub_mem_alloc_profiling_key);
>
> struct alloc_tag_kernel_section kernel_tags = { NULL, 0 };
> unsigned long alloc_tag_ref_mask;
> @@ -710,6 +712,7 @@ static inline void free_mod_tags_mem(void) {}
> static int __init setup_early_mem_profiling(char *str)
> {
> bool compressed = false;
> + bool noslub = false;
> bool enable;
>
> if (!str || !str[0])
> @@ -725,16 +728,19 @@ static int __init setup_early_mem_profiling(char *str)
> if (kstrtobool(token, &enable))
> return -EINVAL;
>
> - if (str) {
> -
> - if (strcmp(str, "compressed"))
> + while ((token = strsep(&str, ",")) != NULL) {
> + if (strcmp(token, "compressed") == 0)
> + compressed = true;
> + else if (strcmp(token, "noslub") == 0)
> + noslub = true;
> + else
> return -EINVAL;
> -
> - compressed = true;
> }
> mem_profiling_support = true;
> - pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n",
> - compressed ? "with" : "without", enable ? "on" : "off");
> + pr_info("Memory allocation profiling is enabled %s compression, %s slub track and is turned %s!\n",
> + compressed ? "with" : "without",
> + noslub ? "without" : "with",
> + enable ? "on" : "off");
> }
>
> if (enable != mem_alloc_profiling_enabled()) {
> @@ -749,6 +755,12 @@ static int __init setup_early_mem_profiling(char *str)
> else
> static_branch_disable(&mem_profiling_compressed);
> }
> + if (noslub == static_key_enabled(&slub_mem_alloc_profiling_key)) {
> + if (noslub)
> + static_branch_disable(&slub_mem_alloc_profiling_key);
> + else
> + static_branch_enable(&slub_mem_alloc_profiling_key);
> + }
>
> return 0;
> }
> diff --git a/mm/slub.c b/mm/slub.c
> index 31e11ef256f9..e8378b092b30 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2093,7 +2093,7 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> return slab_obj_exts(slab) + obj_to_index(s, slab, p);
> }
>
> -/* Should be called only if mem_alloc_profiling_enabled() */
> +/* Should be called only if slub_mem_alloc_profiling_enabled() */
> static noinline void
> __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
> {
> @@ -2102,7 +2102,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
> obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
> /*
> * Currently obj_exts is used only for allocation profiling.
> - * If other users appear then mem_alloc_profiling_enabled()
> + * If other users appear then slub_mem_alloc_profiling_enabled()
> * check should be added before alloc_tag_add().
> */
> if (likely(obj_exts))
> @@ -2112,11 +2112,11 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
> static inline void
> alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
> {
> - if (mem_alloc_profiling_enabled())
> + if (slub_mem_alloc_profiling_enabled())
> __alloc_tagging_slab_alloc_hook(s, object, flags);
> }
>
> -/* Should be called only if mem_alloc_profiling_enabled() */
> +/* Should be called only if slub_mem_alloc_profiling_enabled() */
> static noinline void
> __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> int objects)
> @@ -2143,7 +2143,7 @@ static inline void
> alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> int objects)
> {
> - if (mem_alloc_profiling_enabled())
> + if (slub_mem_alloc_profiling_enabled())
> __alloc_tagging_slab_free_hook(s, slab, p, objects);
> }
>
> --
> 2.34.1
>
Powered by blists - more mailing lists