[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221118222002.82588-4-sidhartha.kumar@oracle.com>
Date: Fri, 18 Nov 2022 14:19:55 -0800
From: Sidhartha Kumar <sidhartha.kumar@...cle.com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc: akpm@...ux-foundation.org, songmuchun@...edance.com,
mike.kravetz@...cle.com, willy@...radead.org,
almasrymina@...gle.com, linmiaohe@...wei.com, hughd@...gle.com,
Sidhartha Kumar <sidhartha.kumar@...cle.com>
Subject: [PATCH mm-unstable v4 03/10] mm/hugetlb: convert dissolve_free_huge_page() to folios
Removes compound_head() call by using a folio rather than a head page.
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@...cle.com>
---
mm/hugetlb.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f6f791675c38..d0acabbf3025 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2128,21 +2128,21 @@ static struct page *remove_pool_huge_page(struct hstate *h,
int dissolve_free_huge_page(struct page *page)
{
int rc = -EBUSY;
+ struct folio *folio = page_folio(page);
retry:
/* Not to disrupt normal path by vainly holding hugetlb_lock */
- if (!PageHuge(page))
+ if (!folio_test_hugetlb(folio))
return 0;
spin_lock_irq(&hugetlb_lock);
- if (!PageHuge(page)) {
+ if (!folio_test_hugetlb(folio)) {
rc = 0;
goto out;
}
- if (!page_count(page)) {
- struct page *head = compound_head(page);
- struct hstate *h = page_hstate(head);
+ if (!folio_ref_count(folio)) {
+ struct hstate *h = folio_hstate(folio);
if (!available_huge_pages(h))
goto out;
@@ -2150,7 +2150,7 @@ int dissolve_free_huge_page(struct page *page)
* We should make sure that the page is already on the free list
* when it is dissolved.
*/
- if (unlikely(!HPageFreed(head))) {
+ if (unlikely(!folio_test_hugetlb_freed(folio))) {
spin_unlock_irq(&hugetlb_lock);
cond_resched();
@@ -2165,7 +2165,7 @@ int dissolve_free_huge_page(struct page *page)
goto retry;
}
- remove_hugetlb_page(h, head, false);
+ remove_hugetlb_page(h, &folio->page, false);
h->max_huge_pages--;
spin_unlock_irq(&hugetlb_lock);
@@ -2177,12 +2177,12 @@ int dissolve_free_huge_page(struct page *page)
* Attempt to allocate vmemmmap here so that we can take
* appropriate action on failure.
*/
- rc = hugetlb_vmemmap_restore(h, head);
+ rc = hugetlb_vmemmap_restore(h, &folio->page);
if (!rc) {
- update_and_free_page(h, head, false);
+ update_and_free_page(h, &folio->page, false);
} else {
spin_lock_irq(&hugetlb_lock);
- add_hugetlb_page(h, head, false);
+ add_hugetlb_page(h, &folio->page, false);
h->max_huge_pages++;
spin_unlock_irq(&hugetlb_lock);
}
--
2.38.1
Powered by blists - more mailing lists