[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210117151053.24600-9-songmuchun@bytedance.com>
Date: Sun, 17 Jan 2021 23:10:49 +0800
From: Muchun Song <songmuchun@...edance.com>
To: corbet@....net, mike.kravetz@...cle.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
viro@...iv.linux.org.uk, akpm@...ux-foundation.org,
paulmck@...nel.org, mchehab+huawei@...nel.org,
pawan.kumar.gupta@...ux.intel.com, rdunlap@...radead.org,
oneukum@...e.com, anshuman.khandual@....com, jroedel@...e.de,
almasrymina@...gle.com, rientjes@...gle.com, willy@...radead.org,
osalvador@...e.de, mhocko@...e.com, song.bao.hua@...ilicon.com,
david@...hat.com, naoya.horiguchi@....com
Cc: duanxiongchun@...edance.com, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v13 08/12] mm: hugetlb: introduce PageHugeInflight
When we free a HugeTLB page whose vmemmap pages can be optimized,
it is freed to the buddy allocator through a kworker. And the ref
count of page is zero, so if we dissolve it before it is freed to
the buddy allocator. It can be freed again. In order to avoid
this, we introduce PageHugeInflight to indicate that the HugeTLB
page is already freed from hugepage pool but not freed to buddy
allocator yet. When we hit the inflight page, we just need to flush
the work.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/hugetlb.c | 38 +++++++++++++++++++++++++++++++++++++-
1 file changed, 37 insertions(+), 1 deletion(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3222bad8b112..14549204ddcb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1343,6 +1343,36 @@ static inline void flush_hpage_update_work(struct hstate *h)
flush_work(&hpage_update_work);
}
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+static inline bool PageHugeInflight(struct page *head)
+{
+ return page_private(head + 5) == -1UL;
+}
+
+static inline void SetPageHugeInflight(struct page *head)
+{
+ set_page_private(head + 5, -1UL);
+}
+
+static inline void ClearPageHugeInflight(struct page *head)
+{
+ set_page_private(head + 5, 0);
+}
+#else
+static inline bool PageHugeInflight(struct page *head)
+{
+ return false;
+}
+
+static inline void SetPageHugeInflight(struct page *head)
+{
+}
+
+static inline void ClearPageHugeInflight(struct page *head)
+{
+}
+#endif
+
static inline void __update_and_free_page(struct hstate *h, struct page *page)
{
/* No need to allocate vmemmap pages */
@@ -1351,6 +1381,8 @@ static inline void __update_and_free_page(struct hstate *h, struct page *page)
return;
}
+ SetPageHugeInflight(page);
+
/*
* Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap
* pages.
@@ -1637,6 +1669,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
free_huge_page_vmemmap(h, page);
+ ClearPageHugeInflight(page);
INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
set_hugetlb_cgroup(page, NULL);
@@ -1913,13 +1946,16 @@ int dissolve_free_huge_page(struct page *page)
if (h->free_huge_pages - h->resv_huge_pages == 0)
goto out;
+ rc = 0;
hwpoison_subpage_set(h, head, page);
+ if (PageHugeInflight(head))
+ goto out;
+
list_del(&head->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
h->max_huge_pages--;
update_and_free_page(h, head);
- rc = 0;
}
out:
spin_unlock(&hugetlb_lock);
--
2.11.0
Powered by blists - more mailing lists