[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240221234732.187629-5-vishal.moola@gmail.com>
Date: Wed, 21 Feb 2024 15:47:31 -0800
From: "Vishal Moola (Oracle)" <vishal.moola@...il.com>
To: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org,
akpm@...ux-foundation.org,
muchun.song@...ux.dev,
willy@...radead.org,
"Vishal Moola (Oracle)" <vishal.moola@...il.com>
Subject: [PATCH v2 4/5] hugetlb: Use vmf_anon_prepare() instead of anon_vma_prepare()
hugetlb_no_page() and hugetlb_wp() call anon_vma_prepare(). In
preparation for hugetlb to safely handle faults under the VMA lock,
use vmf_anon_prepare() here instead.
Additionally, passing hugetlb_wp() the vm_fault struct from hugetlb_fault()
works toward cleaning up the hugetlb code and function stack.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
---
mm/hugetlb.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 70c5870e859e..ae8c8b3da981 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5826,7 +5826,8 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
*/
static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int flags,
- struct folio *pagecache_folio, spinlock_t *ptl)
+ struct folio *pagecache_folio, spinlock_t *ptl,
+ struct vm_fault *vmf)
{
const bool unshare = flags & FAULT_FLAG_UNSHARE;
pte_t pte = huge_ptep_get(ptep);
@@ -5960,10 +5961,9 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
- if (unlikely(anon_vma_prepare(vma))) {
- ret = VM_FAULT_OOM;
+ ret = vmf_anon_prepare(vmf);
+ if (unlikely(ret))
goto out_release_all;
- }
if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
ret = VM_FAULT_HWPOISON_LARGE;
@@ -6203,10 +6203,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
new_pagecache_folio = true;
} else {
folio_lock(folio);
- if (unlikely(anon_vma_prepare(vma))) {
- ret = VM_FAULT_OOM;
+
+ ret = vmf_anon_prepare(vmf);
+ if (unlikely(ret))
goto backout_unlocked;
- }
anon_rmap = 1;
}
} else {
@@ -6273,7 +6273,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
hugetlb_count_add(pages_per_huge_page(h), mm);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
- ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
+ ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl, vmf);
}
spin_unlock(ptl);
@@ -6496,7 +6496,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
if (!huge_pte_write(entry)) {
ret = hugetlb_wp(mm, vma, address, ptep, flags,
- pagecache_folio, ptl);
+ pagecache_folio, ptl, &vmf);
goto out_put_page;
} else if (likely(flags & FAULT_FLAG_WRITE)) {
entry = huge_pte_mkdirty(entry);
--
2.43.0
Powered by blists - more mailing lists