diff --git a/include/linux/rmap.h b/include/linux/rmap.h index cb0ba70..65b841d 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -99,7 +99,7 @@ int try_to_unmap(struct page *, enum ttu_flags flags); * Called from mm/filemap_xip.c to unmap empty zero page */ pte_t *page_check_address(struct page *, struct mm_struct *, - unsigned long, spinlock_t **, int); + unsigned long, spinlock_t **, int, int); /* * Used by swapoff to help locate where page is expected in vma. diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 1888b2d..35be29d 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -188,7 +188,7 @@ retry: address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); BUG_ON(address < vma->vm_start || address >= vma->vm_end); - pte = page_check_address(page, mm, address, &ptl, 1); + pte = page_check_address(page, mm, address, &ptl, 1, 0); if (pte) { /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(*pte)); diff --git a/mm/ksm.c b/mm/ksm.c index 5575f86..8abb14b 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -623,7 +623,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (addr == -EFAULT) goto out; - ptep = page_check_address(page, mm, addr, &ptl, 0); + ptep = page_check_address(page, mm, addr, &ptl, 0, 0); if (!ptep) goto out; diff --git a/mm/rmap.c b/mm/rmap.c index dd43373..4e4eb8e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -270,7 +270,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) * On success returns with pte mapped and locked. */ pte_t *page_check_address(struct page *page, struct mm_struct *mm, - unsigned long address, spinlock_t **ptlp, int sync) + unsigned long address, spinlock_t **ptlp, int sync, int try) { pgd_t *pgd; pud_t *pud; @@ -298,7 +298,13 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, } ptl = pte_lockptr(mm, pmd); - spin_lock(ptl); + if (try) { + if (!spin_trylock(ptl)) { + pte_unmap(pte); + return NULL; + } + } else + spin_lock(ptl); if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { *ptlp = ptl; return pte; @@ -325,7 +331,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) address = vma_address(page, vma); if (address == -EFAULT) /* out of vma range */ return 0; - pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); + pte = page_check_address(page, vma->vm_mm, address, &ptl, 1, 0); if (!pte) /* the page is not in this mm */ return 0; pte_unmap_unlock(pte, ptl); @@ -352,7 +358,7 @@ static int page_referenced_one(struct page *page, if (address == -EFAULT) goto out; - pte = page_check_address(page, mm, address, &ptl, 0); + pte = page_check_address(page, mm, address, &ptl, 0, 1); if (!pte) goto out; @@ -547,7 +553,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) if (address == -EFAULT) goto out; - pte = page_check_address(page, mm, address, &ptl, 1); + pte = page_check_address(page, mm, address, &ptl, 1, 0); if (!pte) goto out; @@ -774,7 +780,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (address == -EFAULT) goto out; - pte = page_check_address(page, mm, address, &ptl, 0); + pte = page_check_address(page, mm, address, &ptl, 0, 0); if (!pte) goto out;