[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251211013019.2080004-3-chenridong@huaweicloud.com>
Date: Thu, 11 Dec 2025 01:30:19 +0000
From: Chen Ridong <chenridong@...weicloud.com>
To: hannes@...xchg.org,
mhocko@...nel.org,
roman.gushchin@...ux.dev,
shakeel.butt@...ux.dev,
muchun.song@...ux.dev,
akpm@...ux-foundation.org,
axelrasmussen@...gle.com,
yuanchu@...gle.com,
weixugc@...gle.com,
david@...nel.org,
zhengqi.arch@...edance.com,
lorenzo.stoakes@...cle.com
Cc: cgroups@...r.kernel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
lujialin4@...wei.com,
chenridong@...weicloud.com
Subject: [PATCH -next v3 2/2] memcg: remove mem_cgroup_size()
From: Chen Ridong <chenridong@...wei.com>
The mem_cgroup_size helper is used only in apply_proportional_protection
to read the current memory usage. Its semantics are unclear and
inconsistent with other sites, which directly call page_counter_read for
the same purpose.
Remove this helper and get its usage via mem_cgroup_protection for
clarity. Additionally, rename the local variable 'cgroup_size' to 'usage'
to better reflect its meaning.
No functional changes intended.
Signed-off-by: Chen Ridong <chenridong@...wei.com>
---
include/linux/memcontrol.h | 18 +++++++-----------
mm/memcontrol.c | 5 -----
mm/vmscan.c | 9 ++++-----
3 files changed, 11 insertions(+), 21 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6a48398a1f4e..603252e3169c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -557,13 +557,15 @@ static inline bool mem_cgroup_disabled(void)
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
unsigned long *min,
- unsigned long *low)
+ unsigned long *low,
+ unsigned long *usage)
{
- *min = *low = 0;
+ *min = *low = *usage = 0;
if (mem_cgroup_disabled())
return;
+ *usage = page_counter_read(&memcg->memory);
/*
* There is no reclaim protection applied to a targeted reclaim.
* We are special casing this specific case here because
@@ -919,8 +921,6 @@ static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
-
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -1102,9 +1102,10 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
unsigned long *min,
- unsigned long *low)
+ unsigned long *low,
+ unsigned long *usage)
{
- *min = *low = 0;
+ *min = *low = *usage = 0;
}
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -1328,11 +1329,6 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return 0;
}
-static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
-{
- return 0;
-}
-
static inline void
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index dbe7d8f93072..659ce171b1b3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1621,11 +1621,6 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return max;
}
-unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
-{
- return page_counter_read(&memcg->memory);
-}
-
void __memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event, bool allow_spinning)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 670fe9fae5ba..9a6ee80275fc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2451,9 +2451,9 @@ static inline void calculate_pressure_balance(struct scan_control *sc,
static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
struct scan_control *sc, unsigned long scan)
{
- unsigned long min, low;
+ unsigned long min, low, usage;
- mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low);
+ mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low, &usage);
if (min || low) {
/*
@@ -2485,7 +2485,6 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
* again by how much of the total memory used is under
* hard protection.
*/
- unsigned long cgroup_size = mem_cgroup_size(memcg);
unsigned long protection;
/* memory.low scaling, make sure we retry before OOM */
@@ -2497,9 +2496,9 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
}
/* Avoid TOCTOU with earlier protection check */
- cgroup_size = max(cgroup_size, protection);
+ usage = max(usage, protection);
- scan -= scan * protection / (cgroup_size + 1);
+ scan -= scan * protection / (usage + 1);
/*
* Minimally target SWAP_CLUSTER_MAX pages to keep
--
2.34.1
Powered by blists - more mailing lists