[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <88a341bf-0b6a-454a-aeb1-0699233eb37c@redhat.com>
Date: Tue, 5 Dec 2023 14:17:38 +0100
From: David Hildenbrand <david@...hat.com>
To: Ryan Roberts <ryan.roberts@....com>, linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Hugh Dickins <hughd@...gle.com>,
Yin Fengwei <fengwei.yin@...el.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <muchun.song@...ux.dev>,
Peter Xu <peterx@...hat.com>
Subject: Re: [PATCH RFC 34/39] mm/rmap: introduce
folio_try_dup_anon_rmap_[pte|ptes|pmd]()
On 05.12.23 14:12, Ryan Roberts wrote:
> On 04/12/2023 14:21, David Hildenbrand wrote:
>> The last user of page_needs_cow_for_dma() and __page_dup_rmap() are gone,
>> remove them.
>>
>> Add folio_try_dup_anon_rmap_ptes() right away, we want to perform rmap
>> baching during fork() soon.
>>
>> Signed-off-by: David Hildenbrand <david@...hat.com>
>> ---
>> include/linux/mm.h | 6 --
>> include/linux/rmap.h | 145 +++++++++++++++++++++++++++++--------------
>> 2 files changed, 100 insertions(+), 51 deletions(-)
>>
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index 24c1c7c5a99c0..f7565b35ae931 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -1964,12 +1964,6 @@ static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
>> return folio_maybe_dma_pinned(folio);
>> }
>>
>> -static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
>> - struct page *page)
>> -{
>> - return folio_needs_cow_for_dma(vma, page_folio(page));
>> -}
>> -
>> /**
>> * is_zero_page - Query if a page is a zero page
>> * @page: The page to query
>> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
>> index 21d72cc602adc..84439f7720c62 100644
>> --- a/include/linux/rmap.h
>> +++ b/include/linux/rmap.h
>> @@ -354,68 +354,123 @@ static inline void folio_dup_file_rmap_pmd(struct folio *folio,
>> #endif
>> }
>>
>> -static inline void __page_dup_rmap(struct page *page, bool compound)
>> +static inline int __folio_try_dup_anon_rmap(struct folio *folio,
>
> __always_inline?
Yes.
>
>> + struct page *page, unsigned int nr_pages,
>> + struct vm_area_struct *src_vma, enum rmap_mode mode)
>> {
>> - VM_WARN_ON(folio_test_hugetlb(page_folio(page)));
>> + int i;
>>
>> - if (compound) {
>> - struct folio *folio = (struct folio *)page;
>> + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
>>
>> - VM_BUG_ON_PAGE(compound && !PageHead(page), page);
>> - atomic_inc(&folio->_entire_mapcount);
>> - } else {
>> - atomic_inc(&page->_mapcount);
>> + /*
>> + * No need to check+clear for already shared PTEs/PMDs of the folio.
>> + * This includes PTE mappings of (order-0) KSM folios.
>> + */
>> + if (likely(mode == RMAP_MODE_PTE)) {
>
> Presumbly if __always_inline then the compiler will remove this if/else and just
> keep the part indicated by mode? In which case "likely" is pretty useless? Same
> for all similar sites in the other patches.
Yes, also had this in mind. As long as we use __always_inline it
shouldn't ever matter.
>
>> + for (i = 0; i < nr_pages; i++) {
>> + if (PageAnonExclusive(page + i))
>> + goto clear;
>> + }
>> + } else if (mode == RMAP_MODE_PMD) {
>> + if (PageAnonExclusive(page))
>> + goto clear;
>> }
>> + goto dup;
>> +
>> +clear:
>> + /*
>> + * If this folio may have been pinned by the parent process,
>> + * don't allow to duplicate the mappings but instead require to e.g.,
>> + * copy the subpage immediately for the child so that we'll always
>> + * guarantee the pinned folio won't be randomly replaced in the
>> + * future on write faults.
>> + */
>> + if (likely(!folio_is_device_private(folio) &&
>> + unlikely(folio_needs_cow_for_dma(src_vma, folio))))
>> + return -EBUSY;
>> +
>> + if (likely(mode == RMAP_MODE_PTE)) {
>> + for (i = 0; i < nr_pages; i++)
>
> Do you really need to reset i=0 here? You have already checked that lower pages
> are shared in the above loop, so can't you just start from the first exclusive
> page here?
It's best to check the updated patch I sent.
--
Cheers,
David / dhildenb
Powered by blists - more mailing lists