[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230324223028.172-11-beaub@linux.microsoft.com>
Date: Fri, 24 Mar 2023 15:30:27 -0700
From: Beau Belgrave <beaub@...ux.microsoft.com>
To: rostedt@...dmis.org, mhiramat@...nel.org,
mathieu.desnoyers@...icios.com, dcook@...ux.microsoft.com,
alanau@...ux.microsoft.com, brauner@...nel.org,
akpm@...ux-foundation.org, ebiederm@...ssion.com,
keescook@...omium.org, tglx@...utronix.de
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-trace-kernel@...r.kernel.org
Subject: [PATCH v9 10/11] tracing/user_events: Charge event allocs to cgroups
Operators need a way to limit how much memory cgroups use. User events need
to be included into that accounting. Fix this by using GFP_KERNEL_ACCOUNT
for allocations generated by user programs for user_event tracing.
Signed-off-by: Beau Belgrave <beaub@...ux.microsoft.com>
---
kernel/trace/trace_events_user.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index e4ee25d16f3b..222f2eb59c7c 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -442,7 +442,7 @@ static bool user_event_enabler_dup(struct user_event_enabler *orig,
if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
return true;
- enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT);
+ enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
if (!enabler)
return false;
@@ -502,7 +502,7 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t)
struct user_event_mm *user_mm;
unsigned long flags;
- user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL);
+ user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
if (!user_mm)
return NULL;
@@ -662,7 +662,7 @@ static struct user_event_enabler
if (!user_mm)
return NULL;
- enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
+ enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
if (!enabler)
goto out;
@@ -870,7 +870,7 @@ static int user_event_add_field(struct user_event *user, const char *type,
struct ftrace_event_field *field;
int validator_flags = 0;
- field = kmalloc(sizeof(*field), GFP_KERNEL);
+ field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
if (!field)
return -ENOMEM;
@@ -889,7 +889,7 @@ static int user_event_add_field(struct user_event *user, const char *type,
if (strstr(type, "char") != NULL)
validator_flags |= VALIDATOR_ENSURE_NULL;
- validator = kmalloc(sizeof(*validator), GFP_KERNEL);
+ validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
if (!validator) {
kfree(field);
@@ -1175,7 +1175,7 @@ static int user_event_create_print_fmt(struct user_event *user)
len = user_event_set_print_fmt(user, NULL, 0);
- print_fmt = kmalloc(len, GFP_KERNEL);
+ print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
if (!print_fmt)
return -ENOMEM;
@@ -1508,7 +1508,7 @@ static int user_event_create(const char *raw_command)
raw_command += USER_EVENTS_PREFIX_LEN;
raw_command = skip_spaces(raw_command);
- name = kstrdup(raw_command, GFP_KERNEL);
+ name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
if (!name)
return -ENOMEM;
@@ -1704,7 +1704,7 @@ static int user_event_parse(struct user_event_group *group, char *name,
return 0;
}
- user = kzalloc(sizeof(*user), GFP_KERNEL);
+ user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
if (!user)
return -ENOMEM;
@@ -1874,7 +1874,7 @@ static int user_events_open(struct inode *node, struct file *file)
if (!group)
return -ENOENT;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
if (!info)
return -ENOMEM;
@@ -1927,7 +1927,7 @@ static int user_events_ref_add(struct user_event_file_info *info,
size = struct_size(refs, events, count + 1);
- new_refs = kzalloc(size, GFP_KERNEL);
+ new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
if (!new_refs)
return -ENOMEM;
--
2.25.1
Powered by blists - more mailing lists