[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200915125947.26204-13-songmuchun@bytedance.com>
Date: Tue, 15 Sep 2020 20:59:35 +0800
From: Muchun Song <songmuchun@...edance.com>
To: corbet@....net, mike.kravetz@...cle.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
viro@...iv.linux.org.uk, akpm@...ux-foundation.org,
paulmck@...nel.org, mchehab+huawei@...nel.org,
pawan.kumar.gupta@...ux.intel.com, rdunlap@...radead.org,
oneukum@...e.com, anshuman.khandual@....com, jroedel@...e.de,
almasrymina@...gle.com, rientjes@...gle.com
Cc: linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [RFC PATCH 12/24] mm/hugetlb: Defer freeing of hugetlb pages
In the subsequent patch, we will allocate the vmemmap pages when free
huge pages. But update_and_free_page() is be called from a non-task
context(and hold hugetlb_lock), we can defer the actual freeing in
a workqueue to prevent use GFP_ATOMIC to allocate the vmemmap pages.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/hugetlb.c | 94 +++++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 85 insertions(+), 9 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a628588a075a..6b57a1183785 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1292,6 +1292,8 @@ static inline void destroy_compound_gigantic_page(struct page *page,
unsigned int order) { }
#endif
+static void __free_hugepage(struct hstate *h, struct page *page);
+
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
#include <linux/bootmem_info.h>
@@ -1642,6 +1644,64 @@ static void free_huge_page_vmemmap(struct hstate *h, struct page *head)
free_vmemmap_page_list(&free_pages);
}
+
+/*
+ * As update_and_free_page() is be called from a non-task context(and hold
+ * hugetlb_lock), we can defer the actual freeing in a workqueue to prevent
+ * use GFP_ATOMIC to allocate a lot of vmemmap pages.
+ *
+ * update_hpage_vmemmap_workfn() locklessly retrieves the linked list of
+ * pages to be freed and frees them one-by-one. As the page->mapping pointer
+ * is going to be cleared in update_hpage_vmemmap_workfn() anyway, it is
+ * reused as the llist_node structure of a lockless linked list of huge
+ * pages to be freed.
+ */
+static LLIST_HEAD(hpage_update_freelist);
+
+static void update_hpage_vmemmap_workfn(struct work_struct *work)
+{
+ struct llist_node *node;
+ struct page *page;
+
+ node = llist_del_all(&hpage_update_freelist);
+
+ while (node) {
+ page = container_of((struct address_space **)node,
+ struct page, mapping);
+ node = node->next;
+ page->mapping = NULL;
+ __free_hugepage(page_hstate(page), page);
+
+ cond_resched();
+ }
+}
+static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn);
+
+static inline void __update_and_free_page(struct hstate *h, struct page *page)
+{
+ /* No need to allocate vmemmap pages */
+ if (!nr_free_vmemmap(h)) {
+ __free_hugepage(h, page);
+ return;
+ }
+
+ /*
+ * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap
+ * pages.
+ *
+ * Only call schedule_work() if hpage_update_freelist is previously
+ * empty. Otherwise, schedule_work() had been called but the workfn
+ * hasn't retrieved the list yet.
+ */
+ if (llist_add((struct llist_node *)&page->mapping,
+ &hpage_update_freelist))
+ schedule_work(&hpage_update_work);
+}
+
+static inline void free_gigantic_page_comm(struct hstate *h, struct page *page)
+{
+ free_gigantic_page(page, huge_page_order(h));
+}
#else
static inline void hugetlb_vmemmap_init(struct hstate *h)
{
@@ -1659,17 +1719,39 @@ static inline void vmemmap_pgtable_free(struct hstate *h, struct page *page)
static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
{
}
+
+static inline void __update_and_free_page(struct hstate *h, struct page *page)
+{
+ __free_hugepage(h, page);
+}
+
+static inline void free_gigantic_page_comm(struct hstate *h, struct page *page)
+{
+ /*
+ * Temporarily drop the hugetlb_lock, because
+ * we might block in free_gigantic_page().
+ */
+ spin_unlock(&hugetlb_lock);
+ free_gigantic_page(page, huge_page_order(h));
+ spin_lock(&hugetlb_lock);
+}
#endif
static void update_and_free_page(struct hstate *h, struct page *page)
{
- int i;
-
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
+
+ __update_and_free_page(h, page);
+}
+
+static void __free_hugepage(struct hstate *h, struct page *page)
+{
+ int i;
+
for (i = 0; i < pages_per_huge_page(h); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
@@ -1681,14 +1763,8 @@ static void update_and_free_page(struct hstate *h, struct page *page)
set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
set_page_refcounted(page);
if (hstate_is_gigantic(h)) {
- /*
- * Temporarily drop the hugetlb_lock, because
- * we might block in free_gigantic_page().
- */
- spin_unlock(&hugetlb_lock);
destroy_compound_gigantic_page(page, huge_page_order(h));
- free_gigantic_page(page, huge_page_order(h));
- spin_lock(&hugetlb_lock);
+ free_gigantic_page_comm(h, page);
} else {
__free_pages(page, huge_page_order(h));
}
--
2.20.1
Powered by blists - more mailing lists