[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.21.2001131646490.164268@chino.kir.corp.google.com>
Date: Mon, 13 Jan 2020 16:48:35 -0800 (PST)
From: David Rientjes <rientjes@...gle.com>
To: Mina Almasry <almasrymina@...gle.com>
cc: mike.kravetz@...cle.com, shuah@...nel.org, shakeelb@...gle.com,
gthelen@...gle.com, akpm@...ux-foundation.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-kselftest@...r.kernel.org, cgroups@...r.kernel.org,
aneesh.kumar@...ux.vnet.ibm.com, mkoutny@...e.com
Subject: Re: [PATCH v9 6/8] hugetlb_cgroup: support noreserve mappings
On Tue, 17 Dec 2019, Mina Almasry wrote:
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 39eb2fa53a420..3e94f5c2d7cd4 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1367,6 +1367,9 @@ void free_huge_page(struct page *page)
> clear_page_huge_active(page);
> hugetlb_cgroup_uncharge_page(hstate_index(h), pages_per_huge_page(h),
> page, false);
> + hugetlb_cgroup_uncharge_page(hstate_index(h), pages_per_huge_page(h),
> + page, true);
> +
> if (restore_reserve)
> h->resv_huge_pages++;
>
> @@ -2189,10 +2192,19 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> gbl_chg = 1;
> }
>
> + /* If this allocation is not consuming a reservation, charge it now.
> + */
> + if (map_chg || avoid_reserve || !vma_resv_map(vma)) {
These conditions are checked three times in this function, maybe better
set a bool on the stack and use it throughout the function to guarantee it
remains consistent?
bool deferred_reserve = !vma_resv_map(vma) || map_chg || avoid_reserve;
> + ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h),
> + &h_cg, true);
> + if (ret)
> + goto out_subpool_put;
> + }
> +
> ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg,
> false);
> if (ret)
> - goto out_subpool_put;
> + goto out_uncharge_cgroup_reservation;
>
> spin_lock(&hugetlb_lock);
> /*
> @@ -2216,6 +2228,14 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> }
> hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page,
> false);
> + /* If allocation is not consuming a reservation, also store the
> + * hugetlb_cgroup pointer on the page.
> + */
> + if (!vma_resv_map(vma) || map_chg || avoid_reserve) {
> + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg,
> + page, true);
> + }
> +
> spin_unlock(&hugetlb_lock);
>
> set_page_private(page, (unsigned long)spool);
> @@ -2241,6 +2261,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> out_uncharge_cgroup:
> hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg,
> false);
> +out_uncharge_cgroup_reservation:
> + if (map_chg || avoid_reserve || !vma_resv_map(vma))
> + hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h),
> + h_cg, true);
> out_subpool_put:
> if (map_chg || avoid_reserve)
> hugepage_subpool_put_pages(spool, 1);
> --
> 2.24.1.735.g03f4e72817-goog
>
Powered by blists - more mailing lists