[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240411164756.261178-1-sidhartha.kumar@oracle.com>
Date: Thu, 11 Apr 2024 09:47:56 -0700
From: Sidhartha Kumar <sidhartha.kumar@...cle.com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc: akpm@...ux-foundation.org, willy@...radead.org, linmiaohe@...wei.com,
jane.chu@...cle.com, muchun.song@...ux.dev, nao.horiguchi@...il.com,
Sidhartha Kumar <sidhartha.kumar@...cle.com>
Subject: [PATCH] mm/hugetlb: convert dissolve_free_huge_pages() to folios
Allows us to rename dissolve_free_huge_pages() to
dissolve_free_hugetlb_folio(). Convert one caller to pass in a folio
directly and use page_folio() to convert the caller in mm/memory-failure.
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@...cle.com>
---
include/linux/hugetlb.h | 4 ++--
mm/hugetlb.c | 15 +++++++--------
mm/memory-failure.c | 4 ++--
3 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3f3e628802792..f4191b10345d6 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -861,7 +861,7 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-extern int dissolve_free_huge_page(struct page *page);
+extern int dissolve_free_hugetlb_folio(struct folio *folio);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
@@ -1148,7 +1148,7 @@ static inline int hstate_index(struct hstate *h)
return 0;
}
-static inline int dissolve_free_huge_page(struct page *page)
+static inline int dissolve_free_hugetlb_folio(struct folio *folio)
{
return 0;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 454900c84b303..617f8bec6eb42 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2377,8 +2377,8 @@ static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
}
/*
- * Dissolve a given free hugepage into free buddy pages. This function does
- * nothing for in-use hugepages and non-hugepages.
+ * Dissolve a given free hugetlb folio into free buddy pages. This function
+ * does nothing for in-use hugepages and non-hugepages.
* This function returns values like below:
*
* -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
@@ -2390,10 +2390,9 @@ static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
* 0: successfully dissolved free hugepages or the page is not a
* hugepage (considered as already dissolved)
*/
-int dissolve_free_huge_page(struct page *page)
+int dissolve_free_hugetlb_folio(struct folio *folio)
{
int rc = -EBUSY;
- struct folio *folio = page_folio(page);
retry:
/* Not to disrupt normal path by vainly holding hugetlb_lock */
@@ -2470,13 +2469,13 @@ int dissolve_free_huge_page(struct page *page)
* make specified memory blocks removable from the system.
* Note that this will dissolve a free gigantic hugepage completely, if any
* part of it lies within the given range.
- * Also note that if dissolve_free_huge_page() returns with an error, all
+ * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
* free hugepages that were dissolved before that error are lost.
*/
int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
- struct page *page;
+ struct folio *folio;
int rc = 0;
unsigned int order;
struct hstate *h;
@@ -2489,8 +2488,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
order = min(order, huge_page_order(h));
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
- page = pfn_to_page(pfn);
- rc = dissolve_free_huge_page(page);
+ folio = pfn_folio(pfn);
+ rc = dissolve_free_hugetlb_folio(folio);
if (rc)
break;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 88359a185c5f9..5a6062b61c44d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -155,11 +155,11 @@ static int __page_handle_poison(struct page *page)
/*
* zone_pcp_disable() can't be used here. It will hold pcp_batch_high_lock and
- * dissolve_free_huge_page() might hold cpu_hotplug_lock via static_key_slow_dec()
+ * dissolve_free_hugetlb_folio() might hold cpu_hotplug_lock via static_key_slow_dec()
* when hugetlb vmemmap optimization is enabled. This will break current lock
* dependency chain and leads to deadlock.
*/
- ret = dissolve_free_huge_page(page);
+ ret = dissolve_free_hugetlb_folio(page_folio(page));
if (!ret) {
drain_all_pages(page_zone(page));
ret = take_page_off_buddy(page);
--
2.44.0
Powered by blists - more mailing lists