[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230825190436.55045-4-mike.kravetz@oracle.com>
Date: Fri, 25 Aug 2023 12:04:23 -0700
From: Mike Kravetz <mike.kravetz@...cle.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: Muchun Song <songmuchun@...edance.com>,
Joao Martins <joao.m.martins@...cle.com>,
Oscar Salvador <osalvador@...e.de>,
David Hildenbrand <david@...hat.com>,
Miaohe Lin <linmiaohe@...wei.com>,
David Rientjes <rientjes@...gle.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Naoya Horiguchi <naoya.horiguchi@...ux.dev>,
Barry Song <song.bao.hua@...ilicon.com>,
Michal Hocko <mhocko@...e.com>,
Matthew Wilcox <willy@...radead.org>,
Xiongchun Duan <duanxiongchun@...edance.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Mike Kravetz <mike.kravetz@...cle.com>,
Sidhartha Kumar <sidhartha.kumar@...cle.com>
Subject: [PATCH 03/12] hugetlb: Remove a few calls to page_folio()
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Anything found on a linked list threaded through ->lru is guaranteed to
be a folio as the compound_head found in a tail page overlaps the ->lru
member of struct page. So we can pull folios directly off these lists
no matter whether pages or folios were added to the list.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Reviewed-by: Mike Kravetz <mike.kravetz@...cle.com>
Reviewed-by: Muchun Song <songmuchun@...edance.com>
Cc: Sidhartha Kumar <sidhartha.kumar@...cle.com>
---
mm/hugetlb.c | 26 +++++++++++---------------
1 file changed, 11 insertions(+), 15 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1a48a83846cb..a5348dfada89 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1831,11 +1831,9 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
{
- struct page *page, *t_page;
- struct folio *folio;
+ struct folio *folio, *t_folio;
- list_for_each_entry_safe(page, t_page, list, lru) {
- folio = page_folio(page);
+ list_for_each_entry_safe(folio, t_folio, list, lru) {
update_and_free_hugetlb_folio(h, folio, false);
cond_resched();
}
@@ -2224,8 +2222,7 @@ static struct page *remove_pool_huge_page(struct hstate *h,
bool acct_surplus)
{
int nr_nodes, node;
- struct page *page = NULL;
- struct folio *folio;
+ struct folio *folio = NULL;
lockdep_assert_held(&hugetlb_lock);
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
@@ -2235,15 +2232,14 @@ static struct page *remove_pool_huge_page(struct hstate *h,
*/
if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
!list_empty(&h->hugepage_freelists[node])) {
- page = list_entry(h->hugepage_freelists[node].next,
- struct page, lru);
- folio = page_folio(page);
+ folio = list_entry(h->hugepage_freelists[node].next,
+ struct folio, lru);
remove_hugetlb_folio(h, folio, acct_surplus);
break;
}
}
- return page;
+ return &folio->page;
}
/*
@@ -3359,15 +3355,15 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
* Collect pages to be freed on a list, and free after dropping lock
*/
for_each_node_mask(i, *nodes_allowed) {
- struct page *page, *next;
+ struct folio *folio, *next;
struct list_head *freel = &h->hugepage_freelists[i];
- list_for_each_entry_safe(page, next, freel, lru) {
+ list_for_each_entry_safe(folio, next, freel, lru) {
if (count >= h->nr_huge_pages)
goto out;
- if (PageHighMem(page))
+ if (folio_test_highmem(folio))
continue;
- remove_hugetlb_folio(h, page_folio(page), false);
- list_add(&page->lru, &page_list);
+ remove_hugetlb_folio(h, folio, false);
+ list_add(&folio->lru, &page_list);
}
}
--
2.41.0
Powered by blists - more mailing lists