[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230228213738.272178-34-willy@infradead.org>
Date: Tue, 28 Feb 2023 21:37:36 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-mm@...ck.org, linux-arch@...r.kernel.org
Cc: Yin Fengwei <fengwei.yin@...el.com>, linux-kernel@...r.kernel.org,
Matthew Wilcox <willy@...radead.org>
Subject: [PATCH v3 33/34] mm: Convert do_set_pte() to set_pte_range()
From: Yin Fengwei <fengwei.yin@...el.com>
set_pte_range() allows to setup page table entries for a specific
range. It takes advantage of batched rmap update for large folio.
It now takes care of calling update_mmu_cache_range().
Signed-off-by: Yin Fengwei <fengwei.yin@...el.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
Documentation/filesystems/locking.rst | 2 +-
include/linux/mm.h | 3 ++-
mm/filemap.c | 3 +--
mm/memory.c | 27 +++++++++++++++------------
4 files changed, 19 insertions(+), 16 deletions(-)
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index 7de7a7272a5e..922886fefb7f 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -663,7 +663,7 @@ locked. The VM will unlock the page.
Filesystem should find and map pages associated with offsets from "start_pgoff"
till "end_pgoff". ->map_pages() is called with page table locked and must
not block. If it's not possible to reach a page without blocking,
-filesystem should skip it. Filesystem should use do_set_pte() to setup
+filesystem should skip it. Filesystem should use set_pte_range() to setup
page table entry. Pointer to entry associated with the page is passed in
"pte" field in vm_fault structure. Pointers to entries for other offsets
should be calculated relative to "pte".
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1f79667824eb..568ebe7058d4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1168,7 +1168,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
}
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
-void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+void set_pte_range(struct vm_fault *vmf, struct folio *folio,
+ struct page *page, unsigned int nr, unsigned long addr);
vm_fault_t finish_fault(struct vm_fault *vmf);
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
diff --git a/mm/filemap.c b/mm/filemap.c
index db86e459dde6..07ebd90967a3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3507,8 +3507,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
ret = VM_FAULT_NOPAGE;
ref_count++;
- do_set_pte(vmf, page, addr);
- update_mmu_cache(vma, addr, vmf->pte);
+ set_pte_range(vmf, folio, page, 1, addr);
} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);
/* Restore the vmf->pte */
diff --git a/mm/memory.c b/mm/memory.c
index 69e844d5f75c..efd17ff09315 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4255,7 +4255,8 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
}
#endif
-void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
+void set_pte_range(struct vm_fault *vmf, struct folio *folio,
+ struct page *page, unsigned int nr, unsigned long addr)
{
struct vm_area_struct *vma = vmf->vma;
bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte);
@@ -4263,7 +4264,7 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
bool prefault = vmf->address != addr;
pte_t entry;
- flush_icache_page(vma, page);
+ flush_icache_pages(vma, page, nr);
entry = mk_pte(page, vma->vm_page_prot);
if (prefault && arch_wants_old_prefaulted_pte())
@@ -4277,14 +4278,18 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
entry = pte_mkuffd_wp(entry);
/* copy-on-write page */
if (write && !(vma->vm_flags & VM_SHARED)) {
- inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, vma, addr);
- lru_cache_add_inactive_or_unevictable(page, vma);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
+ VM_BUG_ON_FOLIO(nr != 1, folio);
+ folio_add_new_anon_rmap(folio, vma, addr);
+ folio_add_lru_vma(folio, vma);
} else {
- inc_mm_counter(vma->vm_mm, mm_counter_file(page));
- page_add_file_rmap(page, vma, false);
+ add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
+ folio_add_file_rmap_range(folio, page, nr, vma, false);
}
- set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
+ set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
+
+ /* no need to invalidate: a not-present page won't be cached */
+ update_mmu_cache_range(vma, addr, vmf->pte, nr);
}
static bool vmf_pte_changed(struct vm_fault *vmf)
@@ -4357,11 +4362,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
/* Re-check under ptl */
if (likely(!vmf_pte_changed(vmf))) {
- do_set_pte(vmf, page, vmf->address);
-
- /* no need to invalidate: a not-present page won't be cached */
- update_mmu_cache(vma, vmf->address, vmf->pte);
+ struct folio *folio = page_folio(page);
+ set_pte_range(vmf, folio, page, 1, vmf->address);
ret = 0;
} else {
update_mmu_tlb(vma, vmf->address, vmf->pte);
--
2.39.1
Powered by blists - more mailing lists