[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250424120937.96164-4-link@vivo.com>
Date: Thu, 24 Apr 2025 20:09:29 +0800
From: Huan Yang <link@...o.com>
To: Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Shakeel Butt <shakeel.butt@...ux.dev>,
Muchun Song <muchun.song@...ux.dev>,
Andrew Morton <akpm@...ux-foundation.org>,
Petr Mladek <pmladek@...e.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Huan Yang <link@...o.com>,
Francesco Valla <francesco@...la.it>,
Huang Shijie <shijie@...amperecomputing.com>,
KP Singh <kpsingh@...nel.org>,
"Paul E. McKenney" <paulmck@...nel.org>,
Rasmus Villemoes <linux@...musvillemoes.dk>,
"Uladzislau Rezki (Sony)" <urezki@...il.com>,
Guo Weikang <guoweikang.kernel@...il.com>,
Raul E Rangel <rrangel@...omium.org>,
cgroups@...r.kernel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Boqun Feng <boqun.feng@...il.com>,
Geert Uytterhoeven <geert@...ux-m68k.org>,
Paul Moore <paul@...l-moore.com>,
"Mike Rapoport (Microsoft)" <rppt@...nel.org>
Cc: opensource.kernel@...o.com
Subject: [PATCH v2 3/3] mm/memcg: introduce mem_cgroup_early_init
When cgroup_init() creates root_mem_cgroup through css_online callback,
some critical resources might not be fully initialized, forcing later
operations to perform conditional checks for resource availability.
This patch introduces mem_cgroup_early_init() to address the init order,
it invoke before cgroup_init, so, compare mem_cgroup_init which invoked
by initcall, mem_cgroup_early_init can use to prepare some key resources
before root_mem_cgroup alloc.
Signed-off-by: Huan Yang <link@...o.com>
Suggested-by: Shakeel Butt <shakeel.butt@...ux.dev>
---
include/linux/memcontrol.h | 5 +++++
init/main.c | 2 ++
mm/memcontrol.c | 40 +++++++++++++++++++++++---------------
3 files changed, 31 insertions(+), 16 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5264d148bdd9..231f3c577294 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1057,6 +1057,7 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
return id;
}
+extern void mem_cgroup_early_init(void);
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
@@ -1472,6 +1473,10 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
{
return 0;
}
+
+static inline void mem_cgroup_early_init(void)
+{
+}
#endif /* CONFIG_MEMCG */
/*
diff --git a/init/main.c b/init/main.c
index 6b14e6116a1f..fd59d5ba2dc7 100644
--- a/init/main.c
+++ b/init/main.c
@@ -50,6 +50,7 @@
#include <linux/writeback.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
+#include <linux/memcontrol.h>
#include <linux/cgroup.h>
#include <linux/efi.h>
#include <linux/tick.h>
@@ -1087,6 +1088,7 @@ void start_kernel(void)
nsfs_init();
pidfs_init();
cpuset_init();
+ mem_cgroup_early_init();
cgroup_init();
taskstats_init_early();
delayacct_init();
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e8797382aeb4..bef1be3aad6f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3602,10 +3602,8 @@ static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
- pn = likely(memcg_pn_cachep) ?
- kmem_cache_alloc_node(memcg_pn_cachep,
- GFP_KERNEL | __GFP_ZERO, node) :
- kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
+ pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO,
+ node);
if (!pn)
return false;
@@ -3658,10 +3656,7 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
int __maybe_unused i;
long error;
- memcg = likely(memcg_cachep) ?
- kmem_cache_zalloc(memcg_cachep, GFP_KERNEL) :
- kzalloc(struct_size(memcg, nodeinfo, nr_node_ids),
- GFP_KERNEL);
+ memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL);
if (!memcg)
return ERR_PTR(-ENOMEM);
@@ -5037,6 +5032,27 @@ static int __init cgroup_memory(char *s)
}
__setup("cgroup.memory=", cgroup_memory);
+/**
+ * Before cgroup_init() create root_mem_cgroup, we can prepare
+ * something in here which root_mem_cgroup may need.
+ * This currently initializes:
+ * 1) memcg_cachep - kmem_cache for mem_cgroup struct allocations
+ * 2) memcg_pn_cachep - kmem_cache for mem_cgroup_per_node structs
+ * (one per NUMA node)
+ */
+void __init mem_cgroup_early_init(void)
+{
+ struct mem_cgroup *memcg;
+ unsigned int memcg_size;
+
+ memcg_size = struct_size(memcg, nodeinfo, nr_node_ids);
+ memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
+
+ memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN);
+}
+
/*
* subsys_initcall() for memory controller.
*
@@ -5048,7 +5064,6 @@ __setup("cgroup.memory=", cgroup_memory);
static int __init mem_cgroup_init(void)
{
int cpu;
- unsigned int memcg_size;
/*
* Currently s32 type (can refer to struct batched_lruvec_stat) is
@@ -5065,13 +5080,6 @@ static int __init mem_cgroup_init(void)
INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
drain_local_stock);
- memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids);
- memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0,
- SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
-
- memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node,
- SLAB_PANIC | SLAB_HWCACHE_ALIGN);
-
return 0;
}
subsys_initcall(mem_cgroup_init);
--
2.48.1
Powered by blists - more mailing lists