[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJuCfpFvzO5++MQQB6=dTFzumBmBtdS+vZEU+iF4wMAYWVDqQg@mail.gmail.com>
Date: Wed, 18 Dec 2024 10:22:53 -0800
From: Suren Baghdasaryan <surenb@...gle.com>
To: David Wang <00107082@....com>
Cc: kent.overstreet@...ux.dev, akpm@...ux-foundation.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2] lib/alloc_tag: Add accumulative call counter for
memory allocation profiling
On Wed, Dec 18, 2024 at 4:49 AM David Wang <00107082@....com> wrote:
>
> Hi,
>
> I found another usage/benefit for accumulative counters:
>
> On my system, /proc/allocinfo yields about 5065 lines, of which 2/3 lines have accumulative counter *0*.
> meaning no memory activities. (right?)
> It is quite a waste to keep those items which are *not alive yet*.
> With additional changes, only 1684 lines in /proc/allocinfo on my system:
>
> --- a/lib/alloc_tag.c
> +++ b/lib/alloc_tag.c
> @@ -95,8 +95,11 @@ static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
> struct alloc_tag_counters counter = alloc_tag_read(tag);
> s64 bytes = counter.bytes;
>
> + if (counter.accu_calls == 0)
> + return;
> seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
>
>
> I think this is quite an improvement worth pursuing.
> (counter.calls could also be used to filter out "inactive" items, but
> lines keep disappearing/reappearing can confuse monitoring systems.)
Please see discussion at
https://lore.kernel.org/all/20241211085616.2471901-1-quic_zhenhuah@quicinc.com/
>
>
> Thanks~
> David
>
>
> At 2024-09-13 13:57:29, "David Wang" <00107082@....com> wrote:
> >Accumulative allocation counter can be used to evaluate
> >memory allocation behavior/rate via delta(counters)/delta(time).
> >It can help analysis performance issues, identify top modules
> >with high rate of memory allocation activity.
> >Considering this would incur extra performance and memory impact,
> >introduce kconfig MEM_ALLOC_PROFILING_ACCUMULATIVE_CALL_COUNTER.
> >
> >Signed-off-by: David Wang <00107082@....com>
> >---
> >Changes in v2:
> >- Add kconfig MEM_ALLOC_PROFILING_ACCUMULATIVE_CALL_COUNTER as
> >suggested by "Kent Overstreet <kent.overstreet@...ux.dev>"
> >
> >---
> > include/linux/alloc_tag.h | 18 +++++++++++++++---
> > lib/Kconfig.debug | 10 ++++++++++
> > lib/alloc_tag.c | 10 +++++++++-
> > 3 files changed, 34 insertions(+), 4 deletions(-)
> >
> >diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
> >index 8c61ccd161ba..5a94d61205b5 100644
> >--- a/include/linux/alloc_tag.h
> >+++ b/include/linux/alloc_tag.h
> >@@ -18,6 +18,9 @@
> > struct alloc_tag_counters {
> > u64 bytes;
> > u64 calls;
> >+#ifdef CONFIG_MEM_ALLOC_PROFILING_ACCUMULATIVE_CALL_COUNTER
> >+ u64 accu_calls;
> >+#endif
> > };
> >
> > /*
> >@@ -103,13 +106,19 @@ static inline bool mem_alloc_profiling_enabled(void)
> > static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
> > {
> > struct alloc_tag_counters v = { 0, 0 };
> >+#ifdef CONFIG_MEM_ALLOC_PROFILING_ACCUMULATIVE_CALL_COUNTER
> >+ v.accu_calls = 0;
> >+#endif
> > struct alloc_tag_counters *counter;
> > int cpu;
> >
> > for_each_possible_cpu(cpu) {
> >- counter = per_cpu_ptr(tag->counters, cpu);
> >- v.bytes += counter->bytes;
> >- v.calls += counter->calls;
> >+ counter = per_cpu_ptr(tag->counters, cpu);
> >+ v.bytes += counter->bytes;
> >+ v.calls += counter->calls;
> >+#ifdef CONFIG_MEM_ALLOC_PROFILING_ACCUMULATIVE_CALL_COUNTER
> >+ v.accu_calls += counter->accu_calls;
> >+#endif
> > }
> >
> > return v;
> >@@ -145,6 +154,9 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag
> > * counter because when we free each part the counter will be decremented.
> > */
> > this_cpu_inc(tag->counters->calls);
> >+#ifdef CONFIG_MEM_ALLOC_PROFILING_ACCUMULATIVE_CALL_COUNTER
> >+ this_cpu_inc(tag->counters->accu_calls);
> >+#endif
> > }
> >
> > static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
> >diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
> >index a30c03a66172..1e9974d28510 100644
> >--- a/lib/Kconfig.debug
> >+++ b/lib/Kconfig.debug
> >@@ -1000,6 +1000,16 @@ config MEM_ALLOC_PROFILING_DEBUG
> > Adds warnings with helpful error messages for memory allocation
> > profiling.
> >
> >+config MEM_ALLOC_PROFILING_ACCUMULATIVE_CALL_COUNTER
> >+ bool "Enable accumulative allocation counters"
> >+ default n
> >+ depends on MEM_ALLOC_PROFILING
> >+ help
> >+ Record accumulative call counters for memory allocation. This may have
> >+ extra performance and memory impact, but the impact is small.
> >+ The stat can be used to evaluate allocation activity/rate
> >+ via delta(counter)/delta(time).
> >+
> > source "lib/Kconfig.kasan"
> > source "lib/Kconfig.kfence"
> > source "lib/Kconfig.kmsan"
> >diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
> >index 81e5f9a70f22..6b03edb04e7d 100644
> >--- a/lib/alloc_tag.c
> >+++ b/lib/alloc_tag.c
> >@@ -66,8 +66,12 @@ static void allocinfo_stop(struct seq_file *m, void *arg)
> > static void print_allocinfo_header(struct seq_buf *buf)
> > {
> > /* Output format version, so we can change it. */
> >- seq_buf_printf(buf, "allocinfo - version: 1.0\n");
> >+ seq_buf_printf(buf, "allocinfo - version: 1.1\n");
> >+#ifdef CONFIG_MEM_ALLOC_PROFILING_ACCUMULATIVE_CALL_COUNTER
> >+ seq_buf_printf(buf, "# <size> <calls> <tag info> <accumulative calls>\n");
> >+#else
> > seq_buf_printf(buf, "# <size> <calls> <tag info>\n");
> >+#endif
> > }
> >
> > static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
> >@@ -78,8 +82,12 @@ static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
> >
> > seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
> > codetag_to_text(out, ct);
> >+#ifdef CONFIG_MEM_ALLOC_PROFILING_ACCUMULATIVE_CALL_COUNTER
> >+ seq_buf_printf(out, " %llu\n", counter.accu_calls);
> >+#else
> > seq_buf_putc(out, ' ');
> > seq_buf_putc(out, '\n');
> >+#endif
> > }
> >
> > static int allocinfo_show(struct seq_file *m, void *arg)
> >--
> >2.39.2
Powered by blists - more mailing lists