[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191024202858.95342-7-almasrymina@google.com>
Date: Thu, 24 Oct 2019 13:28:56 -0700
From: Mina Almasry <almasrymina@...gle.com>
To: mike.kravetz@...cle.com
Cc: shuah@...nel.org, almasrymina@...gle.com, rientjes@...gle.com,
shakeelb@...gle.com, gthelen@...gle.com, akpm@...ux-foundation.org,
khalid.aziz@...cle.com, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-kselftest@...r.kernel.org,
cgroups@...r.kernel.org, aneesh.kumar@...ux.vnet.ibm.com,
mkoutny@...e.com
Subject: [PATCH v7 7/9] hugetlb_cgroup: support noreserve mappings
Support MAP_NORESERVE accounting as part of the new counter.
For each hugepage allocation, at allocation time we check if there is
a reservation for this allocation or not. If there is a reservation for
this allocation, then this allocation was charged at reservation time,
and we don't re-account it. If there is no reserevation for this
allocation, we charge the appropriate hugetlb_cgroup.
The hugetlb_cgroup to uncharge for this allocation is stored in
page[3].private. We use new APIs added in an earlier patch to set this
pointer.
---
mm/hugetlb.c | 25 ++++++++++++++++++++++++-
1 file changed, 24 insertions(+), 1 deletion(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cef15e68626bd..7715018a0af22 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1253,6 +1253,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
1 << PG_writeback);
}
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page, false), page);
+ VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page, true), page);
set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
set_page_refcounted(page);
if (hstate_is_gigantic(h)) {
@@ -1364,6 +1365,9 @@ void free_huge_page(struct page *page)
clear_page_huge_active(page);
hugetlb_cgroup_uncharge_page(hstate_index(h), pages_per_huge_page(h),
page, false);
+ hugetlb_cgroup_uncharge_page(hstate_index(h), pages_per_huge_page(h),
+ page, true);
+
if (restore_reserve)
h->resv_huge_pages++;
@@ -1390,6 +1394,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, NULL, false);
+ set_hugetlb_cgroup(page, NULL, true);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
@@ -2195,10 +2200,19 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
gbl_chg = 1;
}
+ /* If this allocation is not consuming a reservation, charge it now.
+ */
+ if (map_chg || avoid_reserve || !vma_resv_map(vma)) {
+ ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h),
+ &h_cg, true);
+ if (ret)
+ goto out_subpool_put;
+ }
+
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg,
false);
if (ret)
- goto out_subpool_put;
+ goto out_uncharge_cgroup_reservation;
spin_lock(&hugetlb_lock);
/*
@@ -2222,6 +2236,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
}
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page,
false);
+ if (!vma_resv_map(vma) || map_chg || avoid_reserve) {
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg,
+ page, true);
+ }
+
spin_unlock(&hugetlb_lock);
set_page_private(page, (unsigned long)spool);
@@ -2247,6 +2266,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
out_uncharge_cgroup:
hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg,
false);
+out_uncharge_cgroup_reservation:
+ if (map_chg || avoid_reserve || !vma_resv_map(vma))
+ hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h),
+ h_cg, true);
out_subpool_put:
if (map_chg || avoid_reserve)
hugepage_subpool_put_pages(spool, 1);
--
2.24.0.rc0.303.g954a862665-goog
Powered by blists - more mailing lists