[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241017160438.3893293-2-joshua.hahnjy@gmail.com>
Date: Thu, 17 Oct 2024 09:04:38 -0700
From: Joshua Hahn <joshua.hahnjy@...il.com>
To: hannes@...xchg.org
Cc: nphamcs@...il.com,
mhocko@...nel.org,
roman.gushchin@...ux.dev,
shakeel.butt@...ux.dev,
muchun.song@...ux.dev,
akpm@...ux-foundation.org,
cgroups@...r.kernel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
lnyng@...a.com
Subject: [PATCH 1/1] memcg/hugetlb: Adding hugeTLB counters to memory controller
HugeTLB is added as a metric in memcg_stat_item, and is updated in the
alloc and free methods for hugeTLB, after (un)charging has already been
committed. Changes are batched and updated / flushed like the rest of
the memcg stats, which makes additional overhead by the infrequent
hugetlb allocs / frees minimal.
Signed-off-by: Joshua Hahn <joshua.hahnjy@...il.com>
---
include/linux/memcontrol.h | 3 +++
mm/hugetlb.c | 5 +++++
mm/memcontrol.c | 6 ++++++
3 files changed, 14 insertions(+)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 34d2da05f2f1..66e925ae499a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -39,6 +39,9 @@ enum memcg_stat_item {
MEMCG_KMEM,
MEMCG_ZSWAP_B,
MEMCG_ZSWAPPED,
+#ifdef CONFIG_HUGETLB_PAGE
+ MEMCG_HUGETLB,
+#endif
MEMCG_NR_STAT,
};
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 190fa05635f4..ca7151096712 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1887,6 +1887,7 @@ void free_huge_folio(struct folio *folio)
struct hstate *h = folio_hstate(folio);
int nid = folio_nid(folio);
struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
+ struct mem_cgroup *memcg = get_mem_cgroup_from_current();
bool restore_reserve;
unsigned long flags;
@@ -1926,6 +1927,8 @@ void free_huge_folio(struct folio *folio)
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), folio);
mem_cgroup_uncharge(folio);
+ mod_memcg_state(memcg, MEMCG_HUGETLB, -pages_per_huge_page(h));
+ mem_cgroup_put(memcg);
if (restore_reserve)
h->resv_huge_pages++;
@@ -3093,6 +3096,8 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
if (!memcg_charge_ret)
mem_cgroup_commit_charge(folio, memcg);
+
+ mod_memcg_state(memcg, MEMCG_HUGETLB, nr_pages);
mem_cgroup_put(memcg);
return folio;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7845c64a2c57..4180ee876adb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -320,6 +320,9 @@ static const unsigned int memcg_stat_items[] = {
MEMCG_KMEM,
MEMCG_ZSWAP_B,
MEMCG_ZSWAPPED,
+#ifdef CONFIG_HUGETLB_PAGE
+ MEMCG_HUGETLB,
+#endif
};
#define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
@@ -1324,6 +1327,9 @@ static const struct memory_stat memory_stats[] = {
{ "sock", MEMCG_SOCK },
{ "vmalloc", MEMCG_VMALLOC },
{ "shmem", NR_SHMEM },
+#ifdef CONFIG_HUGETLB_PAGE
+ { "hugeTLB", MEMCG_HUGETLB },
+#endif
#ifdef CONFIG_ZSWAP
{ "zswap", MEMCG_ZSWAP_B },
{ "zswapped", MEMCG_ZSWAPPED },
--
2.43.5
Powered by blists - more mailing lists