[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240122194200.381241-8-david@redhat.com>
Date: Mon, 22 Jan 2024 20:41:56 +0100
From: David Hildenbrand <david@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org,
David Hildenbrand <david@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Matthew Wilcox <willy@...radead.org>,
Ryan Roberts <ryan.roberts@....com>,
Russell King <linux@...linux.org.uk>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Dinh Nguyen <dinguyen@...nel.org>,
Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
"Aneesh Kumar K.V" <aneesh.kumar@...nel.org>,
"Naveen N. Rao" <naveen.n.rao@...ux.ibm.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Gerald Schaefer <gerald.schaefer@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>,
Sven Schnelle <svens@...ux.ibm.com>,
"David S. Miller" <davem@...emloft.net>,
linux-arm-kernel@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org,
linux-riscv@...ts.infradead.org,
linux-s390@...r.kernel.org,
sparclinux@...r.kernel.org
Subject: [PATCH v1 07/11] mm/memory: factor out copying the actual PTE in copy_present_pte()
Let's prepare for further changes.
Signed-off-by: David Hildenbrand <david@...hat.com>
---
mm/memory.c | 60 ++++++++++++++++++++++++++++-------------------------
1 file changed, 32 insertions(+), 28 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 7e1f4849463aa..2aa2051ee51d3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -930,6 +930,29 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
return 0;
}
+static inline void __copy_present_pte(struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
+ pte_t pte, unsigned long addr)
+{
+ struct mm_struct *src_mm = src_vma->vm_mm;
+
+ /* If it's a COW mapping, write protect it both processes. */
+ if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
+ ptep_set_wrprotect(src_mm, addr, src_pte);
+ pte = pte_wrprotect(pte);
+ }
+
+ /* If it's a shared mapping, mark it clean in the child. */
+ if (src_vma->vm_flags & VM_SHARED)
+ pte = pte_mkclean(pte);
+ pte = pte_mkold(pte);
+
+ if (!userfaultfd_wp(dst_vma))
+ pte = pte_clear_uffd_wp(pte);
+
+ set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
+}
+
/*
* Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
* is required to copy this pte.
@@ -939,16 +962,16 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
struct folio **prealloc)
{
- struct mm_struct *src_mm = src_vma->vm_mm;
- unsigned long vm_flags = src_vma->vm_flags;
pte_t pte = ptep_get(src_pte);
struct page *page;
struct folio *folio;
page = vm_normal_page(src_vma, addr, pte);
- if (page)
- folio = page_folio(page);
- if (page && folio_test_anon(folio)) {
+ if (unlikely(!page))
+ goto copy_pte;
+
+ folio = page_folio(page);
+ if (folio_test_anon(folio)) {
/*
* If this page may have been pinned by the parent process,
* copy the page immediately for the child so that we'll always
@@ -963,34 +986,15 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
addr, rss, prealloc, page);
}
rss[MM_ANONPAGES]++;
- } else if (page) {
+ VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
+ } else {
folio_get(folio);
folio_dup_file_rmap_pte(folio, page);
rss[mm_counter_file(page)]++;
}
- /*
- * If it's a COW mapping, write protect it both
- * in the parent and the child
- */
- if (is_cow_mapping(vm_flags) && pte_write(pte)) {
- ptep_set_wrprotect(src_mm, addr, src_pte);
- pte = pte_wrprotect(pte);
- }
- VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
-
- /*
- * If it's a shared mapping, mark it clean in
- * the child
- */
- if (vm_flags & VM_SHARED)
- pte = pte_mkclean(pte);
- pte = pte_mkold(pte);
-
- if (!userfaultfd_wp(dst_vma))
- pte = pte_clear_uffd_wp(pte);
-
- set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
+copy_pte:
+ __copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, pte, addr);
return 0;
}
--
2.43.0
Powered by blists - more mailing lists