[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <175197569695.977073.14996860364063727757.stgit@mhiramat.tok.corp.google.com>
Date: Tue, 8 Jul 2025 20:54:57 +0900
From: "Masami Hiramatsu (Google)" <mhiramat@...nel.org>
To: Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org
Subject: [PATCH 2/2] tracing: Allocate field->type only if it needs to be sanitized
From: Masami Hiramatsu (Google) <mhiramat@...nel.org>
__trace_define_field() always allocate field->type for sanitize
the type string, but almost all cases it does not contain the
string to be sanitized. To reduce such memory usage, prevent
to allocate field->type unless it actually has the string which
needs to be sanitized.
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@...nel.org>
---
kernel/trace/trace.h | 1 +
kernel/trace/trace_events.c | 28 ++++++++++++++++++++--------
2 files changed, 21 insertions(+), 8 deletions(-)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index bd084953a98b..cd7be4ce6ee9 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1468,6 +1468,7 @@ struct ftrace_event_field {
int size;
unsigned int is_signed:1;
unsigned int needs_test:1;
+ unsigned int alloc_type:1;
int len;
};
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 2f950aceb783..d95f24d61875 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -145,6 +145,11 @@ static void sanitize_field_type(char *type)
}
}
+static bool need_sanitize_field_type(const char *type)
+{
+ return !!strstr(type, ATTRIBUTE_STR);
+}
+
static int __trace_define_field(struct list_head *head, const char *__type,
const char *name, int offset, int size,
int is_signed, int filter_type, int len,
@@ -159,16 +164,22 @@ static int __trace_define_field(struct list_head *head, const char *__type,
field->name = name;
- type = kstrdup(__type, GFP_KERNEL);
- if (!type) {
- kfree(field);
- return -ENOMEM;
+ if (need_sanitize_field_type(__type)) {
+ type = kstrdup(__type, GFP_KERNEL);
+ if (!type) {
+ kfree(field);
+ return -ENOMEM;
+ }
+ sanitize_field_type(type);
+ field->type = type;
+ field->alloc_type = 1;
+ } else {
+ field->type = __type;
+ field->alloc_type = 0;
}
- sanitize_field_type(type);
- field->type = type;
if (filter_type == FILTER_OTHER)
- field->filter_type = filter_assign_type(type);
+ field->filter_type = filter_assign_type(field->type);
else
field->filter_type = filter_type;
@@ -266,7 +277,8 @@ static void trace_destroy_fields(struct trace_event_call *call)
head = trace_get_fields(call);
list_for_each_entry_safe(field, next, head, link) {
list_del(&field->link);
- kfree(field->type);
+ if (field->alloc_type)
+ kfree(field->type);
kmem_cache_free(field_cachep, field);
}
}
Powered by blists - more mailing lists