[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250829154814.47015-16-frederic@kernel.org>
Date: Fri, 29 Aug 2025 17:47:56 +0200
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Ingo Molnar <mingo@...hat.com>,
Johannes Weiner <hannes@...xchg.org>,
Marco Crivellari <marco.crivellari@...e.com>,
Michal Hocko <mhocko@...e.com>,
Muchun Song <muchun.song@...ux.dev>,
Peter Zijlstra <peterz@...radead.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Shakeel Butt <shakeel.butt@...ux.dev>,
Tejun Heo <tj@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Vlastimil Babka <vbabka@...e.cz>,
Waiman Long <longman@...hat.com>,
cgroups@...r.kernel.org,
linux-mm@...ck.org
Subject: [PATCH 15/33] sched/isolation: Flush memcg workqueues on cpuset isolated partition change
The HK_TYPE_DOMAIN housekeeping cpumask is now modifyable at runtime. In
order to synchronize against memcg workqueue to make sure that no
asynchronous draining is still pending or executing on a newly made
isolated CPU, the housekeeping susbsystem must flush the memcg
workqueues.
However the memcg workqueues can't be flushed easily since they are
queued to the main per-CPU workqueue pool.
Solve this with creating a memcg specific pool and provide and use the
appropriate flushing API.
Acked-by: Shakeel Butt <shakeel.butt@...ux.dev>
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
---
include/linux/memcontrol.h | 4 ++++
kernel/sched/isolation.c | 2 ++
kernel/sched/sched.h | 1 +
mm/memcontrol.c | 12 +++++++++++-
4 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 785173aa0739..8b23ff000473 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1048,6 +1048,8 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
return id;
}
+void mem_cgroup_flush_workqueue(void);
+
extern int mem_cgroup_init(void);
#else /* CONFIG_MEMCG */
@@ -1453,6 +1455,8 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
return 0;
}
+static inline void mem_cgroup_flush_workqueue(void) { }
+
static inline int mem_cgroup_init(void) { return 0; }
#endif /* CONFIG_MEMCG */
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index 48f3b6b20604..e85f402b103a 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -124,6 +124,8 @@ int housekeeping_update(struct cpumask *mask, enum hk_type type)
synchronize_rcu();
+ mem_cgroup_flush_workqueue();
+
kfree(old);
return 0;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d3512138027b..1dad1ac7fc61 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -44,6 +44,7 @@
#include <linux/lockdep_api.h>
#include <linux/lockdep.h>
#include <linux/memblock.h>
+#include <linux/memcontrol.h>
#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/module.h>
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2649d6c09160..1aa2dfa32ccd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -95,6 +95,8 @@ static bool cgroup_memory_nokmem __ro_after_init;
/* BPF memory accounting disabled? */
static bool cgroup_memory_nobpf __ro_after_init;
+static struct workqueue_struct *memcg_wq __ro_after_init;
+
static struct kmem_cache *memcg_cachep;
static struct kmem_cache *memcg_pn_cachep;
@@ -1974,7 +1976,7 @@ static void schedule_drain_work(int cpu, struct work_struct *work)
{
guard(rcu)();
if (!cpu_is_isolated(cpu))
- schedule_work_on(cpu, work);
+ queue_work_on(cpu, memcg_wq, work);
}
/*
@@ -5071,6 +5073,11 @@ void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
refill_stock(memcg, nr_pages);
}
+void mem_cgroup_flush_workqueue(void)
+{
+ flush_workqueue(memcg_wq);
+}
+
static int __init cgroup_memory(char *s)
{
char *token;
@@ -5113,6 +5120,9 @@ int __init mem_cgroup_init(void)
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
+ memcg_wq = alloc_workqueue("memcg", 0, 0);
+ WARN_ON(!memcg_wq);
+
for_each_possible_cpu(cpu) {
INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
drain_local_memcg_stock);
--
2.51.0
Powered by blists - more mailing lists