[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201206082318.11532-9-songmuchun@bytedance.com>
Date: Sun, 6 Dec 2020 16:23:05 +0800
From: Muchun Song <songmuchun@...edance.com>
To: gregkh@...uxfoundation.org, rafael@...nel.org, adobriyan@...il.com,
akpm@...ux-foundation.org, hannes@...xchg.org, mhocko@...nel.org,
vdavydov.dev@...il.com, hughd@...gle.com, will@...nel.org,
guro@...com, rppt@...nel.org, tglx@...utronix.de, esyr@...hat.com,
peterx@...hat.com, krisman@...labora.com, surenb@...gle.com,
avagin@...nvz.org, elver@...gle.com, rdunlap@...radead.org,
iamjoonsoo.kim@....com
Cc: linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, cgroups@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v2 04/12] mm: memcontrol: convert NR_SHMEM_THPS account to pages
Convert NR_SHMEM_THPS account to pages
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
drivers/base/node.c | 3 +--
fs/proc/meminfo.c | 2 +-
mm/filemap.c | 2 +-
mm/huge_memory.c | 3 ++-
mm/khugepaged.c | 2 +-
mm/memcontrol.c | 26 ++------------------------
mm/page_alloc.c | 2 +-
mm/shmem.c | 3 ++-
8 files changed, 11 insertions(+), 32 deletions(-)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 2db28acdaa4f..3e1094717e40 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -462,8 +462,7 @@ static ssize_t node_read_meminfo(struct device *dev,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
,
nid, K(node_page_state(pgdat, NR_ANON_THPS)),
- nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
- HPAGE_PMD_NR),
+ nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
HPAGE_PMD_NR),
nid, K(node_page_state(pgdat, NR_FILE_THPS)),
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index f4157f26cbf5..b4d8a6ee822d 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -131,7 +131,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "AnonHugePages: ",
global_node_page_state(NR_ANON_THPS));
show_val_kb(m, "ShmemHugePages: ",
- global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
+ global_node_page_state(NR_SHMEM_THPS));
show_val_kb(m, "ShmemPmdMapped: ",
global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
show_val_kb(m, "FileHugePages: ",
diff --git a/mm/filemap.c b/mm/filemap.c
index c4dcb1144883..5fdefbbc1bc2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -204,7 +204,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
if (PageSwapBacked(page)) {
__mod_lruvec_page_state(page, NR_SHMEM, -nr);
if (PageTransHuge(page))
- __dec_lruvec_page_state(page, NR_SHMEM_THPS);
+ __mod_lruvec_page_state(page, NR_SHMEM_THPS, -HPAGE_PMD_NR);
} else if (PageTransHuge(page)) {
__mod_lruvec_page_state(page, NR_FILE_THPS, -HPAGE_PMD_NR);
filemap_nr_thps_dec(mapping);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 37840bdeaad0..0e8541bd9f50 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2746,7 +2746,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
spin_unlock(&ds_queue->split_queue_lock);
if (mapping) {
if (PageSwapBacked(head))
- __dec_lruvec_page_state(head, NR_SHMEM_THPS);
+ __mod_lruvec_page_state(head, NR_SHMEM_THPS,
+ -HPAGE_PMD_NR);
else
__mod_lruvec_page_state(head, NR_FILE_THPS,
-HPAGE_PMD_NR);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 1e1ced2208d0..4fe79ccfc312 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1857,7 +1857,7 @@ static void collapse_file(struct mm_struct *mm,
}
if (is_shmem)
- __inc_lruvec_page_state(new_page, NR_SHMEM_THPS);
+ __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, HPAGE_PMD_NR);
else {
__mod_lruvec_page_state(new_page, NR_FILE_THPS, HPAGE_PMD_NR);
filemap_nr_thps_inc(mapping);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c4557de2b211..6d4365d2fd1c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1497,7 +1497,7 @@ struct memory_stat {
unsigned int idx;
};
-static struct memory_stat memory_stats[] = {
+static const struct memory_stat memory_stats[] = {
{ "anon", PAGE_SIZE, NR_ANON_MAPPED },
{ "file", PAGE_SIZE, NR_FILE_PAGES },
{ "kernel_stack", 1024, NR_KERNEL_STACK_KB },
@@ -1508,14 +1508,9 @@ static struct memory_stat memory_stats[] = {
{ "file_dirty", PAGE_SIZE, NR_FILE_DIRTY },
{ "file_writeback", PAGE_SIZE, NR_WRITEBACK },
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- /*
- * The ratio will be initialized in memory_stats_init(). Because
- * on some architectures, the macro of HPAGE_PMD_SIZE is not
- * constant(e.g. powerpc).
- */
{ "anon_thp", PAGE_SIZE, NR_ANON_THPS },
{ "file_thp", PAGE_SIZE, NR_FILE_THPS },
- { "shmem_thp", 0, NR_SHMEM_THPS },
+ { "shmem_thp", PAGE_SIZE, NR_SHMEM_THPS },
#endif
{ "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON },
{ "active_anon", PAGE_SIZE, NR_ACTIVE_ANON },
@@ -1540,23 +1535,6 @@ static struct memory_stat memory_stats[] = {
{ "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM },
};
-static int __init memory_stats_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (memory_stats[i].idx == NR_SHMEM_THPS)
- memory_stats[i].ratio = HPAGE_PMD_SIZE;
-#endif
- VM_BUG_ON(!memory_stats[i].ratio);
- VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT);
- }
-
- return 0;
-}
-pure_initcall(memory_stats_init);
-
static char *memory_stat_format(struct mem_cgroup *memcg)
{
struct seq_buf s;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f97ca98d361f..b6a79196e870 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5567,7 +5567,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(node_page_state(pgdat, NR_WRITEBACK)),
K(node_page_state(pgdat, NR_SHMEM)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
+ K(node_page_state(pgdat, NR_SHMEM_THPS)),
K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
* HPAGE_PMD_NR),
K(node_page_state(pgdat, NR_ANON_THPS)),
diff --git a/mm/shmem.c b/mm/shmem.c
index 5da4f1a3e663..ea5d8c9ccb5b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -713,7 +713,8 @@ static int shmem_add_to_page_cache(struct page *page,
}
if (PageTransHuge(page)) {
count_vm_event(THP_FILE_ALLOC);
- __inc_lruvec_page_state(page, NR_SHMEM_THPS);
+ __mod_lruvec_page_state(page, NR_SHMEM_THPS,
+ HPAGE_PMD_NR);
}
mapping->nrpages += nr;
__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
--
2.11.0
Powered by blists - more mailing lists