[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220204195852.1751729-54-willy@infradead.org>
Date: Fri, 4 Feb 2022 19:58:30 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-mm@...ck.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH 53/75] mm/rmap: Convert make_device_exclusive_range() to use folios
Move the PageTail check earlier so we can avoid even taking the page
lock on tail pages. Otherwise, this is a straightforward use of
folios throughout.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
mm/rmap.c | 39 +++++++++++++++++++++------------------
1 file changed, 21 insertions(+), 18 deletions(-)
diff --git a/mm/rmap.c b/mm/rmap.c
index 4cfac67e328c..ffc1b2f0cf24 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2088,6 +2088,7 @@ struct make_exclusive_args {
static bool page_make_device_exclusive_one(struct page *page,
struct vm_area_struct *vma, unsigned long address, void *priv)
{
+ struct folio *folio = page_folio(page);
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
.vma = vma,
@@ -2104,12 +2105,13 @@ static bool page_make_device_exclusive_one(struct page *page,
pvmw_set_page(&pvmw, page);
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
vma->vm_mm, address, min(vma->vm_end,
- address + page_size(page)), args->owner);
+ address + folio_size(folio)),
+ args->owner);
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
/* Unexpected PMD-mapped THP? */
- VM_BUG_ON_PAGE(!pvmw.pte, page);
+ VM_BUG_ON_FOLIO(!pvmw.pte, folio);
if (!pte_present(*pvmw.pte)) {
ret = false;
@@ -2117,16 +2119,17 @@ static bool page_make_device_exclusive_one(struct page *page,
break;
}
- subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+ subpage = folio_page(folio,
+ pte_pfn(*pvmw.pte) - folio_pfn(folio));
address = pvmw.address;
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
pteval = ptep_clear_flush(vma, address, pvmw.pte);
- /* Move the dirty bit to the page. Now the pte is gone. */
+ /* Set the dirty flag on the folio now the pte is gone. */
if (pte_dirty(pteval))
- set_page_dirty(page);
+ folio_mark_dirty(folio);
/*
* Check that our target page is still mapped at the expected
@@ -2181,8 +2184,8 @@ static bool page_make_device_exclusive_one(struct page *page,
* Returns false if the page is still mapped, or if it could not be unmapped
* from the expected address. Otherwise returns true (success).
*/
-static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm,
- unsigned long address, void *owner)
+static bool folio_make_device_exclusive(struct folio *folio,
+ struct mm_struct *mm, unsigned long address, void *owner)
{
struct make_exclusive_args args = {
.mm = mm,
@@ -2198,16 +2201,15 @@ static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm,
};
/*
- * Restrict to anonymous pages for now to avoid potential writeback
- * issues. Also tail pages shouldn't be passed to rmap_walk so skip
- * those.
+ * Restrict to anonymous folios for now to avoid potential writeback
+ * issues.
*/
- if (!PageAnon(page) || PageTail(page))
+ if (!folio_test_anon(folio))
return false;
- rmap_walk(page, &rwc);
+ rmap_walk(&folio->page, &rwc);
- return args.valid && !page_mapcount(page);
+ return args.valid && !folio_mapcount(folio);
}
/**
@@ -2245,15 +2247,16 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
return npages;
for (i = 0; i < npages; i++, start += PAGE_SIZE) {
- if (!trylock_page(pages[i])) {
- put_page(pages[i]);
+ struct folio *folio = page_folio(pages[i]);
+ if (PageTail(pages[i]) || !folio_trylock(folio)) {
+ folio_put(folio);
pages[i] = NULL;
continue;
}
- if (!page_make_device_exclusive(pages[i], mm, start, owner)) {
- unlock_page(pages[i]);
- put_page(pages[i]);
+ if (!folio_make_device_exclusive(folio, mm, start, owner)) {
+ folio_unlock(folio);
+ folio_put(folio);
pages[i] = NULL;
}
}
--
2.34.1
Powered by blists - more mailing lists