lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date: Mon, 17 Jun 2024 23:32:50 +0800
From: David Wang <00107082@....com>
To: surenb@...gle.com,
	kent.overstreet@...ux.dev,
	akpm@...ux-foundation.org
Cc: linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	David Wang <00107082@....com>
Subject: [PATCH] Add accumulated call counter for memory allocation profiling

Accumulated call counter can be used to evaluate rate
of memory allocation via delta(counters)/delta(time).
This metrics can help analysis performance behaviours,
e.g. tuning cache size, etc.

Signed-off-by: David Wang <00107082@....com>
---
 include/linux/alloc_tag.h | 11 +++++++----
 lib/alloc_tag.c           |  7 +++----
 2 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index abd24016a900..62734244c0b9 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -18,6 +18,7 @@
 struct alloc_tag_counters {
 	u64 bytes;
 	u64 calls;
+	u64 accu_calls;
 };
 
 /*
@@ -102,14 +103,15 @@ static inline bool mem_alloc_profiling_enabled(void)
 
 static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
 {
-	struct alloc_tag_counters v = { 0, 0 };
+	struct alloc_tag_counters v = { 0, 0, 0 };
 	struct alloc_tag_counters *counter;
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
-		counter = per_cpu_ptr(tag->counters, cpu);
-		v.bytes += counter->bytes;
-		v.calls += counter->calls;
+		counter		= per_cpu_ptr(tag->counters, cpu);
+		v.bytes		+= counter->bytes;
+		v.calls		+= counter->calls;
+		v.accu_calls	+= counter->accu_calls;
 	}
 
 	return v;
@@ -145,6 +147,7 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag
 	 * counter because when we free each part the counter will be decremented.
 	 */
 	this_cpu_inc(tag->counters->calls);
+	this_cpu_inc(tag->counters->accu_calls);
 }
 
 static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 11ed973ac359..c4059362d828 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -66,8 +66,8 @@ static void allocinfo_stop(struct seq_file *m, void *arg)
 static void print_allocinfo_header(struct seq_buf *buf)
 {
 	/* Output format version, so we can change it. */
-	seq_buf_printf(buf, "allocinfo - version: 1.0\n");
-	seq_buf_printf(buf, "#     <size>  <calls> <tag info>\n");
+	seq_buf_printf(buf, "allocinfo - version: 1.1\n");
+	seq_buf_printf(buf, "#     <size>  <calls> <tag info> <accumulated calls>\n");
 }
 
 static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
@@ -78,8 +78,7 @@ static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
 
 	seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
 	codetag_to_text(out, ct);
-	seq_buf_putc(out, ' ');
-	seq_buf_putc(out, '\n');
+	seq_buf_printf(out, " %llu\n", counter.accu_calls);
 }
 
 static int allocinfo_show(struct seq_file *m, void *arg)
-- 
2.39.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ