lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5qi2llyzf7gklncflo6gxoozljbm4h3tpnuv4u4ej4ztysvi6f@x44v7nz2wdzd>
Date: Thu, 18 Sep 2025 19:49:01 -0700
From: Shakeel Butt <shakeel.butt@...ux.dev>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Tejun Heo <tj@...nel.org>, Johannes Weiner <hannes@...xchg.org>, 
	Michal Hocko <mhocko@...nel.org>, Roman Gushchin <roman.gushchin@...ux.dev>, 
	Muchun Song <muchun.song@...ux.dev>, Alexei Starovoitov <ast@...nel.org>, 
	Peilin Ye <yepeilin@...gle.com>, Kumar Kartikeya Dwivedi <memxor@...il.com>, bpf@...r.kernel.org, 
	linux-mm@...ck.org, cgroups@...r.kernel.org, linux-kernel@...r.kernel.org, 
	Meta kernel team <kernel-team@...a.com>
Subject: Re: [PATCH] memcg: skip cgroup_file_notify if spinning is not allowed

On Fri, Sep 05, 2025 at 01:16:06PM -0700, Shakeel Butt wrote:
> Generally memcg charging is allowed from all the contexts including NMI
> where even spinning on spinlock can cause locking issues. However one
> call chain was missed during the addition of memcg charging from any
> context support. That is try_charge_memcg() -> memcg_memory_event() ->
> cgroup_file_notify().
> 
> The possible function call tree under cgroup_file_notify() can acquire
> many different spin locks in spinning mode. Some of them are
> cgroup_file_kn_lock, kernfs_notify_lock, pool_workqeue's lock. So, let's
> just skip cgroup_file_notify() from memcg charging if the context does
> not allow spinning.
> 
> Signed-off-by: Shakeel Butt <shakeel.butt@...ux.dev>

Here I am just pasting the irq_work based prototype which is build
tested only for now and sharing it early to show how it looks. Overall I
think it is adding too much complexity which is not worth it. We have to
add per-cpu irq_work and then for each memcg we have to add per-cpu
lockless node to queue the deferred event update. Also more reasoning is
needed to make sure the updates are not missed by the deferred work.

Anyways, this is the early prototype. Unless there are comments on how
to make it better, I will ask Andrew to just pick the previous patch I
sent.


>From d58d772f306454f0dffa94bfb32195496c450892 Mon Sep 17 00:00:00 2001
From: Shakeel Butt <shakeel.butt@...ux.dev>
Date: Thu, 18 Sep 2025 19:25:37 -0700
Subject: [PATCH] memcg: add support for deferred max memcg event

Signed-off-by: Shakeel Butt <shakeel.butt@...ux.dev>
---
 include/linux/memcontrol.h |  3 ++
 mm/memcontrol.c            | 85 ++++++++++++++++++++++++++++++++++++--
 2 files changed, 84 insertions(+), 4 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 16fe0306e50e..3f803957e05d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -69,6 +69,7 @@ struct mem_cgroup_id {
 	refcount_t ref;
 };
 
+struct deferred_events_percpu;
 struct memcg_vmstats_percpu;
 struct memcg1_events_percpu;
 struct memcg_vmstats;
@@ -268,6 +269,8 @@ struct mem_cgroup {
 
 	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
 
+	struct deferred_events_percpu __percpu *deferred_events;
+
 #ifdef CONFIG_CGROUP_WRITEBACK
 	struct list_head cgwb_list;
 	struct wb_domain cgwb_domain;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e090f29eb03b..a34cb728c5c6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -132,6 +132,63 @@ bool mem_cgroup_kmem_disabled(void)
 	return cgroup_memory_nokmem;
 }
 
+struct deferred_events_percpu {
+	atomic_t max_events;
+	struct mem_cgroup *memcg_owner;
+	struct llist_node lnode;
+};
+
+struct defer_memcg_events {
+	struct llist_head memcg_llist;
+	struct irq_work work;
+};
+
+static void process_deferred_events(struct irq_work *work)
+{
+	struct defer_memcg_events *events = container_of(work,
+						struct defer_memcg_events, work);
+	struct llist_node *lnode;
+
+	while (lnode = llist_del_first_init(&events->memcg_llist)) {
+		int i, num;
+		struct deferred_events_percpu *eventsc;
+
+		eventsc = container_of(lnode, struct deferred_events_percpu, lnode);
+
+		if (!atomic_read(&eventsc->max_events))
+			continue;
+		num = atomic_xchg(&eventsc->max_events, 0);
+		if (!num)
+			continue;
+		for (i = 0; i < num; i++)
+			memcg_memory_event(eventsc->memcg_owner, MEMCG_MAX);
+	}
+}
+
+static DEFINE_PER_CPU(struct defer_memcg_events, postpone_events) = {
+	.memcg_llist = LLIST_HEAD_INIT(memcg_llist),
+	.work = IRQ_WORK_INIT(process_deferred_events),
+};
+
+static void memcg_memory_max_event_queue(struct mem_cgroup *memcg)
+{
+	int cpu;
+	struct defer_memcg_events *devents;
+	struct deferred_events_percpu *dmemcg_events;
+
+	cpu = get_cpu();
+	devents = per_cpu_ptr(&postpone_events, cpu);
+	dmemcg_events = per_cpu_ptr(memcg->deferred_events, cpu);
+
+	atomic_inc(&dmemcg_events->max_events);
+	// barrier here to make sure that if following llist_add returns false,
+	// the corresponding llist_del_first_init will see our increment.
+	if (llist_add(&dmemcg_events->lnode, &devents->memcg_llist))
+		irq_work_queue(&devents->work);
+
+	put_cpu();
+}
+
 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
 
 static void obj_cgroup_release(struct percpu_ref *ref)
@@ -2307,12 +2364,13 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
 	bool drained = false;
 	bool raised_max_event = false;
 	unsigned long pflags;
+	bool allow_spinning = gfpflags_allow_spinning(gfp_mask);
 
 retry:
 	if (consume_stock(memcg, nr_pages))
 		return 0;
 
-	if (!gfpflags_allow_spinning(gfp_mask))
+	if (!allow_spinning)
 		/* Avoid the refill and flush of the older stock */
 		batch = nr_pages;
 
@@ -2348,7 +2406,10 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
 	if (!gfpflags_allow_blocking(gfp_mask))
 		goto nomem;
 
-	memcg_memory_event(mem_over_limit, MEMCG_MAX);
+	if (allow_spinning)
+		memcg_memory_event(mem_over_limit, MEMCG_MAX);
+	else
+		memcg_memory_max_event_queue(mem_over_limit);
 	raised_max_event = true;
 
 	psi_memstall_enter(&pflags);
@@ -2414,8 +2475,12 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
 	 * If the allocation has to be enforced, don't forget to raise
 	 * a MEMCG_MAX event.
 	 */
-	if (!raised_max_event)
-		memcg_memory_event(mem_over_limit, MEMCG_MAX);
+	if (!raised_max_event) {
+		if (allow_spinning)
+			memcg_memory_event(mem_over_limit, MEMCG_MAX);
+		else
+			memcg_memory_max_event_queue(mem_over_limit);
+	}
 
 	/*
 	 * The allocation either can't fail or will lead to more memory
@@ -3689,6 +3754,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
 		free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
 	memcg1_free_events(memcg);
 	kfree(memcg->vmstats);
+	free_percpu(memcg->deferred_events);
 	free_percpu(memcg->vmstats_percpu);
 	kfree(memcg);
 }
@@ -3704,6 +3770,7 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
 {
 	struct memcg_vmstats_percpu *statc;
 	struct memcg_vmstats_percpu __percpu *pstatc_pcpu;
+	struct deferred_events_percpu *devents;
 	struct mem_cgroup *memcg;
 	int node, cpu;
 	int __maybe_unused i;
@@ -3729,6 +3796,11 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
 	if (!memcg->vmstats_percpu)
 		goto fail;
 
+	memcg->deferred_events = alloc_percpu_gfp(struct deferred_events_percpu,
+						  GFP_KERNEL_ACCOUNT);
+	if (!memcg->deferred_events)
+		goto fail;
+
 	if (!memcg1_alloc_events(memcg))
 		goto fail;
 
@@ -3738,6 +3810,11 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
 		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
 		statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
 		statc->vmstats = memcg->vmstats;
+
+		devents = per_cpu_ptr(memcg->deferred_events, cpu);
+		atomic_set(&devents->max_events, 0);
+		devents->memcg_owner = memcg;
+		init_llist_node(&devents->lnode);
 	}
 
 	for_each_node(node)
-- 
2.47.3


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ