[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240221234732.187629-3-vishal.moola@gmail.com>
Date: Wed, 21 Feb 2024 15:47:29 -0800
From: "Vishal Moola (Oracle)" <vishal.moola@...il.com>
To: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org,
akpm@...ux-foundation.org,
muchun.song@...ux.dev,
willy@...radead.org,
"Vishal Moola (Oracle)" <vishal.moola@...il.com>
Subject: [PATCH v2 2/5] hugetlb: Move vm_struct declaration to the top of hugetlb_fault()
hugetlb_fault() currently defines a vm_struct to pass to the generic
handle_userfault() function. We can move this definition to the top of
hugetlb_fault() so that it can be used throughout the rest of the
hugetlb fault path.
This will help cleanup a number of excess variables and function
arguments throughout the stack. Also, since vm_fault already has space
to store the page offset, use that instead and get rid of idx.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
---
mm/hugetlb.c | 32 +++++++++++++++++++-------------
1 file changed, 19 insertions(+), 13 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ed1581b670d4..d792d60ea16c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6353,13 +6353,25 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl;
vm_fault_t ret;
u32 hash;
- pgoff_t idx;
struct folio *folio = NULL;
struct folio *pagecache_folio = NULL;
struct hstate *h = hstate_vma(vma);
struct address_space *mapping;
int need_wait_lock = 0;
unsigned long haddr = address & huge_page_mask(h);
+ struct vm_fault vmf = {
+ .vma = vma,
+ .address = haddr,
+ .real_address = address,
+ .flags = flags,
+ .pgoff = vma_hugecache_offset(h, vma, haddr),
+ /* TODO: Track hugetlb faults using vm_fault */
+
+ /*
+ * Some fields may not be initialized, be careful as it may
+ * be hard to debug if called functions make assumptions
+ */
+ };
/* TODO: Handle faults under the VMA lock */
if (flags & FAULT_FLAG_VMA_LOCK) {
@@ -6373,8 +6385,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* the same page in the page cache.
*/
mapping = vma->vm_file->f_mapping;
- idx = vma_hugecache_offset(h, vma, haddr);
- hash = hugetlb_fault_mutex_hash(mapping, idx);
+ hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
@@ -6408,8 +6419,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* hugetlb_no_page will drop vma lock and hugetlb fault
* mutex internally, which make us return immediately.
*/
- return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
- entry, flags);
+
+ return hugetlb_no_page(mm, vma, mapping, vmf.pgoff, address,
+ ptep, entry, flags);
}
ret = 0;
@@ -6455,7 +6467,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* Just decrements count, does not deallocate */
vma_end_reservation(h, vma, haddr);
- pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, idx);
+ pagecache_folio = filemap_lock_hugetlb_folio(h, mapping,
+ vmf.pgoff);
if (IS_ERR(pagecache_folio))
pagecache_folio = NULL;
}
@@ -6470,13 +6483,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
(flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
if (!userfaultfd_wp_async(vma)) {
- struct vm_fault vmf = {
- .vma = vma,
- .address = haddr,
- .real_address = address,
- .flags = flags,
- };
-
spin_unlock(ptl);
if (pagecache_folio) {
folio_unlock(pagecache_folio);
--
2.43.0
Powered by blists - more mailing lists