lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210319224209.150047-9-mike.kravetz@oracle.com>
Date:   Fri, 19 Mar 2021 15:42:09 -0700
From:   Mike Kravetz <mike.kravetz@...cle.com>
To:     linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc:     Michal Hocko <mhocko@...e.com>, Shakeel Butt <shakeelb@...gle.com>,
        Oscar Salvador <osalvador@...e.de>,
        David Hildenbrand <david@...hat.com>,
        Muchun Song <songmuchun@...edance.com>,
        David Rientjes <rientjes@...gle.com>,
        Miaohe Lin <linmiaohe@...wei.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Matthew Wilcox <willy@...radead.org>,
        HORIGUCHI NAOYA <naoya.horiguchi@....com>,
        "Aneesh Kumar K . V" <aneesh.kumar@...ux.ibm.com>,
        Waiman Long <longman@...hat.com>, Peter Xu <peterx@...hat.com>,
        Mina Almasry <almasrymina@...gle.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Mike Kravetz <mike.kravetz@...cle.com>
Subject: [RFC PATCH 8/8] hugetlb: track hugetlb pages allocated via cma_alloc

When a hugetlb page allocated via cma_alloc is freed via cma_release
the cma_release call may sleep.  For now, only gigantic pages can be
allocated via cma_alloc.  The routine free_huge_page can not sleep,
so it defers freeing of all gigantic pages to a workqueue.

Use a new hugetlb page specific flag HPageCma to indicate the page was
allocated via cma_alloc.  This flag can be used so that only gigantic
pages allocated via cma_alloc will have deferred freeing.

Signed-off-by: Mike Kravetz <mike.kravetz@...cle.com>
---
 include/linux/hugetlb.h |  7 +++++++
 mm/hugetlb.c            | 18 ++++++++++--------
 2 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a81ca39c06be..0aba6957a73a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -498,12 +498,18 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  *	modifications require hugetlb_lock.
  * HPG_freed - Set when page is on the free lists.
  *	Synchronization: hugetlb_lock held for examination and modification.
+ * HPG_cma - Set if huge page was directly allocated from CMA area via
+ *      cma_alloc.  Initially set for gigantic page cma allocations, but can
+ *      be set in non-gigantic pages if gigantic pages are demoted.
+ *	Synchronization: Only accessed or modified when there is only one
+ *	reference to the page at allocation or free time.
  */
 enum hugetlb_page_flags {
 	HPG_restore_reserve = 0,
 	HPG_migratable,
 	HPG_temporary,
 	HPG_freed,
+	HPG_cma,
 	__NR_HPAGEFLAGS,
 };
 
@@ -549,6 +555,7 @@ HPAGEFLAG(RestoreReserve, restore_reserve)
 HPAGEFLAG(Migratable, migratable)
 HPAGEFLAG(Temporary, temporary)
 HPAGEFLAG(Freed, freed)
+HPAGEFLAG(Cma, cma)
 
 #ifdef CONFIG_HUGETLB_PAGE
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b8304b290a73..5efff5ce337f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1241,13 +1241,11 @@ static void destroy_compound_gigantic_page(struct page *page,
 
 static void free_gigantic_page(struct page *page, unsigned int order)
 {
-	/*
-	 * If the page isn't allocated using the cma allocator,
-	 * cma_release() returns false.
-	 */
 #ifdef CONFIG_CMA
-	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
+	if (HPageCma(page)) {
+		cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order);
 		return;
+	}
 #endif
 
 	free_contig_range(page_to_pfn(page), 1 << order);
@@ -1269,8 +1267,10 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
 		if (hugetlb_cma[nid]) {
 			page = cma_alloc(hugetlb_cma[nid], nr_pages,
 					huge_page_order(h), true);
-			if (page)
+			if (page) {
+				SetHPageCma(page);
 				return page;
+			}
 		}
 
 		if (!(gfp_mask & __GFP_THISNODE)) {
@@ -1280,8 +1280,10 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
 
 				page = cma_alloc(hugetlb_cma[node], nr_pages,
 						huge_page_order(h), true);
-				if (page)
+				if (page) {
+					SetHPageCma(page);
 					return page;
+				}
 			}
 		}
 	}
@@ -1397,7 +1399,7 @@ static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
 static bool free_page_may_sleep(struct hstate *h, struct page *page)
 {
 	/* freeing gigantic pages in CMA may sleep */
-	if (hstate_is_gigantic(h))
+	if (HPageCma(page))
 		return true;
 
 	return false;
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ