[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240815050453.1298138-4-shakeel.butt@linux.dev>
Date: Wed, 14 Aug 2024 22:04:49 -0700
From: Shakeel Butt <shakeel.butt@...ux.dev>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Muchun Song <muchun.song@...ux.dev>,
"T . J . Mercier" <tjmercier@...gle.com>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Meta kernel team <kernel-team@...a.com>,
cgroups@...r.kernel.org
Subject: [PATCH 3/7] memcg: move mem_cgroup_charge_statistics to v1 code
There are no callers of mem_cgroup_charge_statistics() in the v2 code
base, so move it to the v1 only code and rename it to
memcg1_charge_statistics().
Signed-off-by: Shakeel Butt <shakeel.butt@...ux.dev>
---
mm/memcontrol-v1.c | 17 +++++++++++++++--
mm/memcontrol-v1.h | 3 ++-
mm/memcontrol.c | 19 +++----------------
3 files changed, 20 insertions(+), 19 deletions(-)
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 0ce1807ba468..73587e6417c5 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -853,9 +853,9 @@ static int mem_cgroup_move_account(struct folio *folio,
nid = folio_nid(folio);
local_irq_disable();
- mem_cgroup_charge_statistics(to, nr_pages);
+ memcg1_charge_statistics(to, nr_pages);
memcg1_check_events(to, nid);
- mem_cgroup_charge_statistics(from, -nr_pages);
+ memcg1_charge_statistics(from, -nr_pages);
memcg1_check_events(from, nid);
local_irq_enable();
out:
@@ -1439,6 +1439,19 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
}
}
+void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
+{
+ /* pagein of a big page is an event. So, ignore page size */
+ if (nr_pages > 0)
+ __count_memcg_events(memcg, PGPGIN, 1);
+ else {
+ __count_memcg_events(memcg, PGPGOUT, 1);
+ nr_pages = -nr_pages; /* for event */
+ }
+
+ __this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages);
+}
+
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h
index fb7d439f19de..ef72d0b7c5c6 100644
--- a/mm/memcontrol-v1.h
+++ b/mm/memcontrol-v1.h
@@ -7,7 +7,6 @@
/* Cgroup v1 and v2 common declarations */
-void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages);
@@ -116,6 +115,7 @@ bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
void memcg1_oom_recover(struct mem_cgroup *memcg);
+void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
void memcg1_check_events(struct mem_cgroup *memcg, int nid);
void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
@@ -147,6 +147,7 @@ static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) {
static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {}
static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
+static inline void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages) {}
static inline void memcg1_check_events(struct mem_cgroup *memcg, int nid) {}
static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7ea511119567..f8db9924d5dc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -848,19 +848,6 @@ unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
return READ_ONCE(memcg->vmstats->events_local[i]);
}
-void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
-{
- /* pagein of a big page is an event. So, ignore page size */
- if (nr_pages > 0)
- __count_memcg_events(memcg, PGPGIN, 1);
- else {
- __count_memcg_events(memcg, PGPGOUT, 1);
- nr_pages = -nr_pages; /* for event */
- }
-
- __this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages);
-}
-
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{
/*
@@ -2398,7 +2385,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
commit_charge(folio, memcg);
local_irq_disable();
- mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
+ memcg1_charge_statistics(memcg, folio_nr_pages(folio));
memcg1_check_events(memcg, folio_nid(folio));
local_irq_enable();
}
@@ -4775,7 +4762,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
commit_charge(new, memcg);
local_irq_save(flags);
- mem_cgroup_charge_statistics(memcg, nr_pages);
+ memcg1_charge_statistics(memcg, nr_pages);
memcg1_check_events(memcg, folio_nid(new));
local_irq_restore(flags);
}
@@ -5020,7 +5007,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
* only synchronisation we have for updating the per-CPU variables.
*/
memcg_stats_lock();
- mem_cgroup_charge_statistics(memcg, -nr_entries);
+ memcg1_charge_statistics(memcg, -nr_entries);
memcg_stats_unlock();
memcg1_check_events(memcg, folio_nid(folio));
--
2.43.5
Powered by blists - more mailing lists