[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251110232008.1352063-5-shakeel.butt@linux.dev>
Date: Mon, 10 Nov 2025 15:20:08 -0800
From: Shakeel Butt <shakeel.butt@...ux.dev>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Muchun Song <muchun.song@...ux.dev>,
Harry Yoo <harry.yoo@...cle.com>,
Qi Zheng <qi.zheng@...ux.dev>,
Vlastimil Babka <vbabka@...e.cz>,
linux-mm@...ck.org,
cgroups@...r.kernel.org,
linux-kernel@...r.kernel.org,
Meta kernel team <kernel-team@...a.com>
Subject: [PATCH 4/4] memcg: remove __lruvec_stat_mod_folio
The __lruvec_stat_mod_folio is already safe against irqs, so there is no
need to have a separate interface (i.e. lruvec_stat_mod_folio) which
wraps calls to it with irq disabling and reenabling. Let's rename
__lruvec_stat_mod_folio to lruvec_stat_mod_folio.
Signed-off-by: Shakeel Butt <shakeel.butt@...ux.dev>
---
include/linux/vmstat.h | 30 +-----------------------------
mm/filemap.c | 20 ++++++++++----------
mm/huge_memory.c | 4 ++--
mm/khugepaged.c | 8 ++++----
mm/memcontrol.c | 4 ++--
mm/page-writeback.c | 2 +-
mm/rmap.c | 4 ++--
mm/shmem.c | 6 +++---
8 files changed, 25 insertions(+), 53 deletions(-)
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 4eb7753e6e5c..3398a345bda8 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -523,19 +523,9 @@ static inline const char *vm_event_name(enum vm_event_item item)
void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
-void __lruvec_stat_mod_folio(struct folio *folio,
+void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val);
-static inline void lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __lruvec_stat_mod_folio(folio, idx, val);
- local_irq_restore(flags);
-}
-
static inline void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
@@ -550,12 +540,6 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- mod_node_page_state(folio_pgdat(folio), idx, val);
-}
-
static inline void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
@@ -570,18 +554,6 @@ static inline void mod_lruvec_page_state(struct page *page,
#endif /* CONFIG_MEMCG */
-static inline void __lruvec_stat_add_folio(struct folio *folio,
- enum node_stat_item idx)
-{
- __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
-}
-
-static inline void __lruvec_stat_sub_folio(struct folio *folio,
- enum node_stat_item idx)
-{
- __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
-}
-
static inline void lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
{
diff --git a/mm/filemap.c b/mm/filemap.c
index 63eb163af99c..9a52fb3ba093 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -182,13 +182,13 @@ static void filemap_unaccount_folio(struct address_space *mapping,
nr = folio_nr_pages(folio);
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
if (folio_test_swapbacked(folio)) {
- __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
+ lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
+ lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
} else if (folio_test_pmd_mappable(folio)) {
- __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
}
if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags))
@@ -831,13 +831,13 @@ void replace_page_cache_folio(struct folio *old, struct folio *new)
old->mapping = NULL;
/* hugetlb pages do not participate in page cache accounting. */
if (!folio_test_hugetlb(old))
- __lruvec_stat_sub_folio(old, NR_FILE_PAGES);
+ lruvec_stat_sub_folio(old, NR_FILE_PAGES);
if (!folio_test_hugetlb(new))
- __lruvec_stat_add_folio(new, NR_FILE_PAGES);
+ lruvec_stat_add_folio(new, NR_FILE_PAGES);
if (folio_test_swapbacked(old))
- __lruvec_stat_sub_folio(old, NR_SHMEM);
+ lruvec_stat_sub_folio(old, NR_SHMEM);
if (folio_test_swapbacked(new))
- __lruvec_stat_add_folio(new, NR_SHMEM);
+ lruvec_stat_add_folio(new, NR_SHMEM);
xas_unlock_irq(&xas);
if (free_folio)
free_folio(old);
@@ -920,9 +920,9 @@ noinline int __filemap_add_folio(struct address_space *mapping,
/* hugetlb pages do not participate in page cache accounting */
if (!huge) {
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio,
+ lruvec_stat_mod_folio(folio,
NR_FILE_THPS, nr);
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 949250932bb4..943099eae8d5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3866,10 +3866,10 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
if (folio_test_pmd_mappable(folio) &&
new_order < HPAGE_PMD_ORDER) {
if (folio_test_swapbacked(folio)) {
- __lruvec_stat_mod_folio(folio,
+ lruvec_stat_mod_folio(folio,
NR_SHMEM_THPS, -nr);
} else {
- __lruvec_stat_mod_folio(folio,
+ lruvec_stat_mod_folio(folio,
NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 1a08673b0d8b..2a460664a67d 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2174,14 +2174,14 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
if (is_shmem)
- __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
+ lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
else
- __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
+ lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
if (nr_none) {
- __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
+ lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
- __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
+ lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
}
/*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c31074e5852b..7f074d72dabc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -777,7 +777,7 @@ void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
mod_memcg_lruvec_state(lruvec, idx, val);
}
-void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
+void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
int val)
{
struct mem_cgroup *memcg;
@@ -797,7 +797,7 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
mod_lruvec_state(lruvec, idx, val);
rcu_read_unlock();
}
-EXPORT_SYMBOL(__lruvec_stat_mod_folio);
+EXPORT_SYMBOL(lruvec_stat_mod_folio);
void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
{
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a124ab6a205d..ccdeb0e84d39 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2652,7 +2652,7 @@ static void folio_account_dirtied(struct folio *folio,
inode_attach_wb(inode, folio);
wb = inode_to_wb(inode);
- __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
__zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
__node_stat_mod_folio(folio, NR_DIRTIED, nr);
wb_stat_mod(wb, WB_RECLAIMABLE, nr);
diff --git a/mm/rmap.c b/mm/rmap.c
index 60c3cd70b6ea..1b3a3c7b0aeb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1212,12 +1212,12 @@ static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
if (nr) {
idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
- __lruvec_stat_mod_folio(folio, idx, nr);
+ lruvec_stat_mod_folio(folio, idx, nr);
}
if (nr_pmdmapped) {
if (folio_test_anon(folio)) {
idx = NR_ANON_THPS;
- __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
+ lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
} else {
/* NR_*_PMDMAPPED are not maintained per-memcg */
idx = folio_test_swapbacked(folio) ?
diff --git a/mm/shmem.c b/mm/shmem.c
index c3ed2dcd17f8..4fba8a597256 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -882,9 +882,9 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
static void shmem_update_stats(struct folio *folio, int nr_pages)
{
if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
- __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
+ lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
+ lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
}
/*
--
2.47.3
Powered by blists - more mailing lists