[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2ed34573-2f11-485d-9d8d-224e98aa59b2@arm.com>
Date: Tue, 23 Jan 2024 12:35:07 +0000
From: Ryan Roberts <ryan.roberts@....com>
To: David Hildenbrand <david@...hat.com>, linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
Matthew Wilcox <willy@...radead.org>, Russell King <linux@...linux.org.uk>,
Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>,
Dinh Nguyen <dinguyen@...nel.org>, Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
"Aneesh Kumar K.V" <aneesh.kumar@...nel.org>,
"Naveen N. Rao" <naveen.n.rao@...ux.ibm.com>,
Paul Walmsley <paul.walmsley@...ive.com>, Palmer Dabbelt
<palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Gerald Schaefer <gerald.schaefer@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>, Vasily Gorbik <gor@...ux.ibm.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>,
Sven Schnelle <svens@...ux.ibm.com>, "David S. Miller"
<davem@...emloft.net>, linux-arm-kernel@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org, linux-riscv@...ts.infradead.org,
linux-s390@...r.kernel.org, sparclinux@...r.kernel.org
Subject: Re: [PATCH v1 11/11] mm/memory: ignore writable bit in
folio_pte_batch()
On 22/01/2024 19:42, David Hildenbrand wrote:
> ... and conditionally return to the caller if any pte except the first one
> is writable. fork() has to make sure to properly write-protect in case any
> PTE is writable. Other users (e.g., page unmaping) won't care.
>
> Signed-off-by: David Hildenbrand <david@...hat.com>
Reviewed-by: Ryan Roberts <ryan.roberts@....com>
> ---
> mm/memory.c | 26 +++++++++++++++++++++-----
> 1 file changed, 21 insertions(+), 5 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 341b2be845b6e..a26fd0669016b 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -955,7 +955,7 @@ static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
>
> static inline pte_t __pte_batch_clear_ignored(pte_t pte)
> {
> - return pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte)));
> + return pte_wrprotect(pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte))));
> }
>
> /*
> @@ -963,20 +963,29 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte)
> * pages of the same folio.
> *
> * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
> - * the accessed bit, dirty bit and soft-dirty bit.
> + * the accessed bit, dirty bit, soft-dirty bit and writable bit.
> + . If "any_writable" is set, it will indicate if any other PTE besides the
> + * first (given) PTE is writable.
> */
> static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> - pte_t *start_ptep, pte_t pte, int max_nr)
> + pte_t *start_ptep, pte_t pte, int max_nr, bool *any_writable)
> {
> unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
> const pte_t *end_ptep = start_ptep + max_nr;
> pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte));
> pte_t *ptep = start_ptep + 1;
> + bool writable;
> +
> + if (any_writable)
> + *any_writable = false;
>
> VM_WARN_ON_FOLIO(!pte_present(pte), folio);
>
> while (ptep != end_ptep) {
> - pte = __pte_batch_clear_ignored(ptep_get(ptep));
> + pte = ptep_get(ptep);
> + if (any_writable)
> + writable = !!pte_write(pte);
> + pte = __pte_batch_clear_ignored(pte);
>
> if (!pte_same(pte, expected_pte))
> break;
> @@ -989,6 +998,9 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> if (pte_pfn(pte) == folio_end_pfn)
> break;
>
> + if (any_writable)
> + *any_writable |= writable;
> +
> expected_pte = pte_next_pfn(expected_pte);
> ptep++;
> }
> @@ -1010,6 +1022,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
> {
> struct page *page;
> struct folio *folio;
> + bool any_writable;
> int err, nr;
>
> page = vm_normal_page(src_vma, addr, pte);
> @@ -1024,7 +1037,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
> * by keeping the batching logic separate.
> */
> if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
> - nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr);
> + nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr,
> + &any_writable);
> if (folio_test_anon(folio)) {
> folio_ref_add(folio, nr);
> if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
> @@ -1039,6 +1053,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
> folio_dup_file_rmap_ptes(folio, page, nr);
> rss[mm_counter_file(page)] += nr;
> }
> + if (any_writable)
> + pte = pte_mkwrite(pte, src_vma);
> __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
> addr, nr);
> return nr;
Powered by blists - more mailing lists