[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240503005023.174597-7-21cnbao@gmail.com>
Date: Fri, 3 May 2024 12:50:23 +1200
From: Barry Song <21cnbao@...il.com>
To: akpm@...ux-foundation.org,
linux-mm@...ck.org
Cc: baolin.wang@...ux.alibaba.com,
chrisl@...nel.org,
david@...hat.com,
hanchuanhua@...o.com,
hannes@...xchg.org,
hughd@...gle.com,
kasong@...cent.com,
linux-kernel@...r.kernel.org,
ryan.roberts@....com,
surenb@...gle.com,
v-songbaohua@...o.com,
willy@...radead.org,
xiang@...nel.org,
ying.huang@...el.com,
yosryahmed@...gle.com,
yuzhao@...gle.com,
ziy@...dia.com
Subject: [PATCH v3 6/6] mm: swap: entirely map large folios found in swapcache
From: Chuanhua Han <hanchuanhua@...o.com>
When a large folio is found in the swapcache, the current implementation
requires calling do_swap_page() nr_pages times, resulting in nr_pages
page faults. This patch opts to map the entire large folio at once to
minimize page faults. Additionally, redundant checks and early exits
for ARM64 MTE restoring are removed.
Signed-off-by: Chuanhua Han <hanchuanhua@...o.com>
Co-developed-by: Barry Song <v-songbaohua@...o.com>
Signed-off-by: Barry Song <v-songbaohua@...o.com>
---
mm/memory.c | 60 ++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 48 insertions(+), 12 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 22e7c33cc747..940fdbe69fa1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
pte_t pte;
vm_fault_t ret = 0;
void *shadow = NULL;
+ int nr_pages = 1;
+ unsigned long page_idx = 0;
+ unsigned long address = vmf->address;
+ pte_t *ptep;
if (!pte_unmap_same(vmf))
goto out;
@@ -4166,6 +4170,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_nomap;
}
+ ptep = vmf->pte;
+ if (folio_test_large(folio) && folio_test_swapcache(folio)) {
+ int nr = folio_nr_pages(folio);
+ unsigned long idx = folio_page_idx(folio, page);
+ unsigned long folio_start = vmf->address - idx * PAGE_SIZE;
+ unsigned long folio_end = folio_start + nr * PAGE_SIZE;
+ pte_t *folio_ptep;
+ pte_t folio_pte;
+
+ if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
+ goto check_folio;
+ if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
+ goto check_folio;
+
+ folio_ptep = vmf->pte - idx;
+ folio_pte = ptep_get(folio_ptep);
+ if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
+ swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
+ goto check_folio;
+
+ page_idx = idx;
+ address = folio_start;
+ ptep = folio_ptep;
+ nr_pages = nr;
+ entry = folio->swap;
+ page = &folio->page;
+ }
+
+check_folio:
+
/*
* PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
* must never point at an anonymous page in the swapcache that is
@@ -4225,12 +4259,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* We're already holding a reference on the page but haven't mapped it
* yet.
*/
- swap_free_nr(entry, 1);
+ swap_free_nr(entry, nr_pages);
if (should_try_to_free_swap(folio, vma, vmf->flags))
folio_free_swap(folio);
- inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
+ folio_ref_add(folio, nr_pages - 1);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
+ add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
pte = mk_pte(page, vma->vm_page_prot);
/*
@@ -4240,34 +4275,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* exclusivity.
*/
if (!folio_test_ksm(folio) &&
- (exclusive || folio_ref_count(folio) == 1)) {
+ (exclusive || (folio_ref_count(folio) == nr_pages &&
+ folio_nr_pages(folio) == nr_pages))) {
if (vmf->flags & FAULT_FLAG_WRITE) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
vmf->flags &= ~FAULT_FLAG_WRITE;
}
rmap_flags |= RMAP_EXCLUSIVE;
}
- flush_icache_page(vma, page);
+ flush_icache_pages(vma, page, nr_pages);
if (pte_swp_soft_dirty(vmf->orig_pte))
pte = pte_mksoft_dirty(pte);
if (pte_swp_uffd_wp(vmf->orig_pte))
pte = pte_mkuffd_wp(pte);
- vmf->orig_pte = pte;
+ vmf->orig_pte = pte_advance_pfn(pte, page_idx);
/* ksm created a completely new copy */
if (unlikely(folio != swapcache && swapcache)) {
- folio_add_new_anon_rmap(folio, vma, vmf->address);
+ folio_add_new_anon_rmap(folio, vma, address);
folio_add_lru_vma(folio, vma);
} else {
- folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
+ folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
rmap_flags);
}
VM_BUG_ON(!folio_test_anon(folio) ||
(pte_write(pte) && !PageAnonExclusive(page)));
- set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
- arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address,
- pte, vmf->orig_pte, 1);
+ set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
+ arch_do_swap_page_nr(vma->vm_mm, vma, address,
+ pte, pte, nr_pages);
folio_unlock(folio);
if (folio != swapcache && swapcache) {
@@ -4291,7 +4327,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
/* No need to invalidate - it was non-present before */
- update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+ update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
--
2.34.1
Powered by blists - more mailing lists