[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240824010441.21308-3-21cnbao@gmail.com>
Date: Sat, 24 Aug 2024 13:04:41 +1200
From: Barry Song <21cnbao@...il.com>
To: akpm@...ux-foundation.org,
linux-mm@...ck.org
Cc: baolin.wang@...ux.alibaba.com,
chrisl@...nel.org,
david@...hat.com,
hanchuanhua@...o.com,
ioworker0@...il.com,
kaleshsingh@...gle.com,
kasong@...cent.com,
linux-kernel@...r.kernel.org,
ryan.roberts@....com,
usamaarif642@...il.com,
v-songbaohua@...o.com,
yuanshuai@...o.com,
ziy@...dia.com
Subject: [PATCH v4 2/2] mm: count the number of partially mapped anonymous THPs per size
From: Barry Song <v-songbaohua@...o.com>
When a THP is added to the deferred_list due to partially mapped,
its partial pages are unused, leading to wasted memory and potentially
increasing memory reclamation pressure.
Detailing the specifics of how unmapping occurs is quite difficult
and not that useful, so we adopt a simple approach: each time a
THP enters the deferred_list, we increment the count by 1; whenever
it leaves for any reason, we decrement the count by 1.
Signed-off-by: Barry Song <v-songbaohua@...o.com>
Acked-by: David Hildenbrand <david@...hat.com>
---
Documentation/admin-guide/mm/transhuge.rst | 7 +++++++
include/linux/huge_mm.h | 1 +
mm/huge_memory.c | 6 ++++++
3 files changed, 14 insertions(+)
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index b78f2148b242..6630f2ed14ee 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -556,6 +556,13 @@ nr_anon
These huge pages could be entirely mapped or have partially
unmapped/unused subpages.
+nr_anon_partially_mapped
+ the number of anonymous THP which are likely partially mapped, possibly
+ wasting memory, and have been queued for deferred memory reclamation.
+ Note that in corner some cases (e.g., failed migration), we might detect
+ an anonymous THP as "partially mapped" and count it here, even though it
+ is not actually partially mapped anymore.
+
As the system ages, allocating huge pages may be expensive as the
system uses memory compaction to copy data around memory to free a
huge page for use. There are some counters in ``/proc/vmstat`` to help
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 2ee2971e4e10..4902e2f7e896 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -127,6 +127,7 @@ enum mthp_stat_item {
MTHP_STAT_SPLIT_FAILED,
MTHP_STAT_SPLIT_DEFERRED,
MTHP_STAT_NR_ANON,
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
__MTHP_STAT_COUNT
};
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 26ad75fcda62..a81eab98d6b8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -598,6 +598,7 @@ DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
+DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
static struct attribute *anon_stats_attrs[] = {
&anon_fault_alloc_attr.attr,
@@ -611,6 +612,7 @@ static struct attribute *anon_stats_attrs[] = {
&split_failed_attr.attr,
&split_deferred_attr.attr,
&nr_anon_attr.attr,
+ &nr_anon_partially_mapped_attr.attr,
NULL,
};
@@ -3457,6 +3459,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (folio_order(folio) > 1 &&
!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
/*
* Reinitialize page_deferred_list after removing the
* page from the split_queue, otherwise a subsequent
@@ -3523,6 +3526,7 @@ void __folio_undo_large_rmappable(struct folio *folio)
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
list_del_init(&folio->_deferred_list);
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
@@ -3564,6 +3568,7 @@ void deferred_split_folio(struct folio *folio)
if (folio_test_pmd_mappable(folio))
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
ds_queue->split_queue_len++;
#ifdef CONFIG_MEMCG
@@ -3611,6 +3616,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
list_move(&folio->_deferred_list, &list);
} else {
/* We lost race with folio_put() */
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
list_del_init(&folio->_deferred_list);
ds_queue->split_queue_len--;
}
--
2.39.3 (Apple Git-146)
Powered by blists - more mailing lists