[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20170809204333.27485-1-jglisse@redhat.com>
Date: Wed, 9 Aug 2017 16:43:33 -0400
From: jglisse@...hat.com
To: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org,
Jérôme Glisse <jglisse@...hat.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: [PATCH] mm/rmap: try_to_unmap_one() do not call mmu_notifier under ptl v2
From: Jérôme Glisse <jglisse@...hat.com>
MMU notifiers can sleep, but in try_to_unmap_one() we call
mmu_notifier_invalidate_page() under page table lock.
Let's instead use mmu_notifier_invalidate_range() outside
page_vma_mapped_walk() loop.
Changed since v1:
- s/end/address + (1UL << compound_order(page))
- rebase on top of mmotm
Signed-off-by: Jérôme Glisse <jglisse@...hat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Fixes: c7ab0d2fdc84 ("mm: convert try_to_unmap_one() to use page_vma_mapped_walk()")
---
mm/rmap.c | 31 +++++++++++++++++--------------
1 file changed, 17 insertions(+), 14 deletions(-)
diff --git a/mm/rmap.c b/mm/rmap.c
index 2c55e7b8f8f4..37d18dccf17e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1323,7 +1323,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
};
pte_t pteval;
struct page *subpage;
- bool ret = true;
+ bool ret = true, invalidation_needed = false;
enum ttu_flags flags = (enum ttu_flags)arg;
/* munlock has nothing to gain from examining un-locked vmas */
@@ -1376,11 +1376,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!pvmw.pte, page);
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
- address = pvmw.address;
-
if (!(flags & TTU_IGNORE_ACCESS)) {
- if (ptep_clear_flush_young_notify(vma, address,
+ if (ptep_clear_flush_young_notify(vma, pvmw.address,
pvmw.pte)) {
ret = false;
page_vma_mapped_walk_done(&pvmw);
@@ -1389,7 +1387,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
/* Nuke the page table entry. */
- flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+ flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
if (should_defer_flush(mm, flags)) {
/*
* We clear the PTE but do not flush so potentially
@@ -1399,11 +1397,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* transition on a cached TLB entry is written through
* and traps if the PTE is unmapped.
*/
- pteval = ptep_get_and_clear(mm, address, pvmw.pte);
+ pteval = ptep_get_and_clear(mm, pvmw.address,
+ pvmw.pte);
set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
} else {
- pteval = ptep_clear_flush(vma, address, pvmw.pte);
+ pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
}
/* Move the dirty bit to the page. Now the pte is gone. */
@@ -1418,12 +1417,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (PageHuge(page)) {
int nr = 1 << compound_order(page);
hugetlb_count_sub(nr, mm);
- set_huge_swap_pte_at(mm, address,
+ set_huge_swap_pte_at(mm, pvmw.address,
pvmw.pte, pteval,
vma_mmu_pagesize(vma));
} else {
dec_mm_counter(mm, mm_counter(page));
- set_pte_at(mm, address, pvmw.pte, pteval);
+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
}
} else if (pte_unused(pteval)) {
@@ -1447,7 +1446,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
- set_pte_at(mm, address, pvmw.pte, swp_pte);
+ set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(subpage) };
pte_t swp_pte;
@@ -1473,7 +1472,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* If the page was redirtied, it cannot be
* discarded. Remap the page to page table.
*/
- set_pte_at(mm, address, pvmw.pte, pteval);
+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
SetPageSwapBacked(page);
ret = false;
page_vma_mapped_walk_done(&pvmw);
@@ -1481,7 +1480,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
if (swap_duplicate(entry) < 0) {
- set_pte_at(mm, address, pvmw.pte, pteval);
+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
@@ -1497,14 +1496,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
- set_pte_at(mm, address, pvmw.pte, swp_pte);
+ set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
} else
dec_mm_counter(mm, mm_counter_file(page));
discard:
page_remove_rmap(subpage, PageHuge(page));
put_page(page);
- mmu_notifier_invalidate_page(mm, address);
+ invalidation_needed = true;
}
+
+ if (invalidation_needed)
+ mmu_notifier_invalidate_range(mm, address,
+ address + (1UL << compound_order(page)));
return ret;
}
--
2.13.4
Powered by blists - more mailing lists