[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250514050813.2526843-4-shakeel.butt@linux.dev>
Date: Tue, 13 May 2025 22:08:09 -0700
From: Shakeel Butt <shakeel.butt@...ux.dev>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Muchun Song <muchun.song@...ux.dev>,
Vlastimil Babka <vbabka@...e.cz>,
Alexei Starovoitov <ast@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Harry Yoo <harry.yoo@...cle.com>,
Yosry Ahmed <yosry.ahmed@...ux.dev>,
bpf@...r.kernel.org,
linux-mm@...ck.org,
cgroups@...r.kernel.org,
linux-kernel@...r.kernel.org,
Meta kernel team <kernel-team@...a.com>
Subject: [PATCH 3/7] memcg: make mod_memcg_state re-entrant safe against irqs
Let's make mod_memcg_state re-entrant safe against irqs. The only thing
needed is to convert the usage of __this_cpu_add() to this_cpu_add().
In addition, with re-entrant safety, there is no need to disable irqs.
mod_memcg_state() is not safe against nmi, so let's add warning if
someone tries to call it in nmi context.
Signed-off-by: Shakeel Butt <shakeel.butt@...ux.dev>
Acked-by: Vlastimil Babka <vbabka@...e.cz>
---
include/linux/memcontrol.h | 20 ++------------------
mm/memcontrol.c | 8 ++++----
2 files changed, 6 insertions(+), 22 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 308c01bf98f5..38a5d48400bf 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -905,19 +905,9 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
struct mem_cgroup *oom_domain);
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
-void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
- int val);
-
/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_memcg_state(memcg, idx, val);
- local_irq_restore(flags);
-}
+void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val);
static inline void mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int val)
@@ -1384,12 +1374,6 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
}
-static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
- int nr)
-{
-}
-
static inline void mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx,
int nr)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8c8e0e1acd71..75616cd89aa1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -682,12 +682,12 @@ static int memcg_state_val_in_pages(int idx, int val)
}
/**
- * __mod_memcg_state - update cgroup memory statistics
+ * mod_memcg_state - update cgroup memory statistics
* @memcg: the memory cgroup
* @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
* @val: delta to add to the counter, can be negative
*/
-void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
+void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
int val)
{
int i = memcg_stats_index(idx);
@@ -701,7 +701,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
cpu = get_cpu();
- __this_cpu_add(memcg->vmstats_percpu->state[i], val);
+ this_cpu_add(memcg->vmstats_percpu->state[i], val);
val = memcg_state_val_in_pages(idx, val);
memcg_rstat_updated(memcg, val, cpu);
trace_mod_memcg_state(memcg, idx, val);
@@ -2945,7 +2945,7 @@ static void drain_obj_stock(struct obj_stock_pcp *stock)
memcg = get_mem_cgroup_from_objcg(old);
- __mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
+ mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
memcg1_account_kmem(memcg, -nr_pages);
if (!mem_cgroup_is_root(memcg))
memcg_uncharge(memcg, nr_pages);
--
2.47.1
Powered by blists - more mailing lists