[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230125170537.96973-7-sidhartha.kumar@oracle.com>
Date: Wed, 25 Jan 2023 09:05:35 -0800
From: Sidhartha Kumar <sidhartha.kumar@...cle.com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc: akpm@...ux-foundation.org, songmuchun@...edance.com,
mike.kravetz@...cle.com, willy@...radead.org, jhubbard@...dia.com,
gerald.schaefer@...ux.ibm.com,
Sidhartha Kumar <sidhartha.kumar@...cle.com>
Subject: [PATCH v2 6/8] mm/hugetlb: convert hugetlb_add_to_page_cache to take in a folio
Every caller of hugetlb_add_to_page_cache() is now passing in
&folio->page, change the function to take in a folio directly
and clean up the call sites.
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@...cle.com>
---
fs/hugetlbfs/inode.c | 2 +-
include/linux/hugetlb.h | 2 +-
mm/hugetlb.c | 9 ++++-----
3 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e2b8a00696bd..43af1753de5f 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -871,7 +871,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
}
clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
__folio_mark_uptodate(folio);
- error = hugetlb_add_to_page_cache(&folio->page, mapping, index);
+ error = hugetlb_add_to_page_cache(folio, mapping, index);
if (unlikely(error)) {
restore_reserve_on_error(h, &pseudo_vma, addr, folio);
folio_put(folio);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d46f9db09a2c..67cf712c97be 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -722,7 +722,7 @@ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);
struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
-int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
unsigned long address, struct folio *folio);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index aad028706fbc..ab30f8550631 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5662,10 +5662,9 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
return present;
}
-int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx)
{
- struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct hstate *h = hstate_inode(inode);
int err;
@@ -5677,7 +5676,7 @@ int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
__folio_clear_locked(folio);
return err;
}
- ClearHPageRestoreReserve(page);
+ folio_clear_hugetlb_restore_reserve(folio);
/*
* mark folio dirty so that it will not be removed from cache/file
@@ -5836,7 +5835,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
new_folio = true;
if (vma->vm_flags & VM_MAYSHARE) {
- int err = hugetlb_add_to_page_cache(&folio->page, mapping, idx);
+ int err = hugetlb_add_to_page_cache(folio, mapping, idx);
if (err) {
/*
* err can't be -EEXIST which implies someone
@@ -6269,7 +6268,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
* hugetlb_fault_mutex_table that here must be hold by
* the caller.
*/
- ret = hugetlb_add_to_page_cache(&folio->page, mapping, idx);
+ ret = hugetlb_add_to_page_cache(folio, mapping, idx);
if (ret)
goto out_release_nounlock;
folio_in_pagecache = true;
--
2.39.1
Powered by blists - more mailing lists