[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240405102704.77559-3-21cnbao@gmail.com>
Date: Fri, 5 Apr 2024 23:27:04 +1300
From: Barry Song <21cnbao@...il.com>
To: david@...hat.com,
akpm@...ux-foundation.org,
linux-mm@...ck.org,
ryan.roberts@....com
Cc: cerasuolodomenico@...il.com,
chrisl@...nel.org,
kasong@...cent.com,
peterx@...hat.com,
surenb@...gle.com,
v-songbaohua@...o.com,
willy@...radead.org,
yosryahmed@...gle.com,
yuzhao@...gle.com,
linux-kernel@...r.kernel.org
Subject: [PATCH v4 2/2] mm: add per-order mTHP anon_swpout and anon_swpout_fallback counters
From: Barry Song <v-songbaohua@...o.com>
This helps to display the fragmentation situation of the swapfile,
knowing the proportion of how much we haven't split large folios.
So far, we only support non-split swapout for anon memory, with
the possibility of expanding to shmem in the future. So, we add
the "anon" prefix to the counter names.
Signed-off-by: Barry Song <v-songbaohua@...o.com>
---
include/linux/huge_mm.h | 2 ++
mm/huge_memory.c | 4 ++++
mm/page_io.c | 6 +++++-
mm/vmscan.c | 3 +++
4 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c5d33017a4dd..1d893a358df6 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -267,6 +267,8 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
enum mthp_stat_item {
MTHP_STAT_ANON_ALLOC,
MTHP_STAT_ANON_ALLOC_FALLBACK,
+ MTHP_STAT_ANON_SWPOUT,
+ MTHP_STAT_ANON_SWPOUT_FALLBACK,
__MTHP_STAT_COUNT
};
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5b875f0fc923..28113f8fdf18 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -554,10 +554,14 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
DEFINE_MTHP_STAT_ATTR(anon_alloc, MTHP_STAT_ANON_ALLOC);
DEFINE_MTHP_STAT_ATTR(anon_alloc_fallback, MTHP_STAT_ANON_ALLOC_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
+DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
static struct attribute *stats_attrs[] = {
&anon_alloc_attr.attr,
&anon_alloc_fallback_attr.attr,
+ &anon_swpout_attr.attr,
+ &anon_swpout_fallback_attr.attr,
NULL,
};
diff --git a/mm/page_io.c b/mm/page_io.c
index a9a7c236aecc..7669452e8b4d 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -212,13 +212,17 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
static inline void count_swpout_vm_event(struct folio *folio)
{
+ long nr_pages = folio_nr_pages(folio);
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (unlikely(folio_test_pmd_mappable(folio))) {
count_memcg_folio_events(folio, THP_SWPOUT, 1);
count_vm_event(THP_SWPOUT);
}
+ if (nr_pages > 0)
+ count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
#endif
- count_vm_events(PSWPOUT, folio_nr_pages(folio));
+ count_vm_events(PSWPOUT, nr_pages);
}
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ffc4553c8615..b30e6294f82a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1247,6 +1247,9 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
count_vm_event(
THP_SWPOUT_FALLBACK);
}
+ if (nr_pages > 0)
+ count_mthp_stat(get_order(nr_pages * PAGE_SIZE),
+ MTHP_STAT_ANON_SWPOUT_FALLBACK);
#endif
if (!add_to_swap(folio))
goto activate_locked_split;
--
2.34.1
Powered by blists - more mailing lists