[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1446149535-16200-7-git-send-email-ross.zwisler@linux.intel.com>
Date: Thu, 29 Oct 2015 14:12:10 -0600
From: Ross Zwisler <ross.zwisler@...ux.intel.com>
To: linux-kernel@...r.kernel.org
Cc: Ross Zwisler <ross.zwisler@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>,
"J. Bruce Fields" <bfields@...ldses.org>,
"Theodore Ts'o" <tytso@....edu>,
Alexander Viro <viro@...iv.linux.org.uk>,
Andreas Dilger <adilger.kernel@...ger.ca>,
Dan Williams <dan.j.williams@...el.com>,
Dave Chinner <david@...morbit.com>,
Ingo Molnar <mingo@...hat.com>, Jan Kara <jack@...e.com>,
Jeff Layton <jlayton@...chiereds.net>,
Matthew Wilcox <willy@...ux.intel.com>,
Thomas Gleixner <tglx@...utronix.de>,
linux-ext4@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, linux-nvdimm@...ts.01.org, x86@...nel.org,
xfs@....sgi.com, Andrew Morton <akpm@...ux-foundation.org>,
Matthew Wilcox <matthew.r.wilcox@...el.com>
Subject: [RFC 06/11] mm: add pgoff_mkclean()
Introduce pgoff_mkclean() which conceptually is similar to page_mkclean()
except it works in the absence of struct page and it can also be used to
clean PMDs. This is needed for DAX's dirty page handling.
Signed-off-by: Ross Zwisler <ross.zwisler@...ux.intel.com>
---
include/linux/rmap.h | 5 +++++
mm/rmap.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 58 insertions(+)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 29446ae..627875f9 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -223,6 +223,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
int page_mkclean(struct page *);
/*
+ * Cleans and write protects the PTEs of shared mappings.
+ */
+int pgoff_mkclean(pgoff_t, struct address_space *);
+
+/*
* called in munlock()/munmap() path to check for other vmas holding
* the page mlocked.
*/
diff --git a/mm/rmap.c b/mm/rmap.c
index f5b5c1f..0ce16ab 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -586,6 +586,16 @@ vma_address(struct page *page, struct vm_area_struct *vma)
return address;
}
+static inline unsigned long
+pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
+{
+ unsigned long address;
+
+ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
+ return address;
+}
+
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
static void percpu_flush_tlb_batch_pages(void *data)
{
@@ -1040,6 +1050,49 @@ int page_mkclean(struct page *page)
}
EXPORT_SYMBOL_GPL(page_mkclean);
+int pgoff_mkclean(pgoff_t pgoff, struct address_space *mapping)
+{
+ struct vm_area_struct *vma;
+ int ret = 0;
+
+ i_mmap_lock_read(mapping);
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
+ struct mm_struct *mm = vma->vm_mm;
+ pmd_t pmd, *pmdp = NULL;
+ pte_t pte, *ptep = NULL;
+ unsigned long address;
+ spinlock_t *ptl;
+
+ address = pgoff_address(pgoff, vma);
+
+ ret = follow_pte_pmd(mm, address, &ptep, &pmdp, &ptl);
+ if (ret)
+ goto out;
+
+ if (pmdp) {
+ flush_cache_page(vma, address, pmd_pfn(*pmdp));
+ pmd = pmdp_huge_clear_flush(vma, address, pmdp);
+ pmd = pmd_wrprotect(pmd);
+ pmd = pmd_mkclean(pmd);
+ set_pmd_at(mm, address, pmdp, pmd);
+ spin_unlock(ptl);
+ } else {
+ BUG_ON(!ptep);
+ flush_cache_page(vma, address, pte_pfn(*ptep));
+ pte = ptep_clear_flush(vma, address, ptep);
+ pte = pte_wrprotect(pte);
+ pte = pte_mkclean(pte);
+ set_pte_at(mm, address, ptep, pte);
+ pte_unmap_unlock(ptep, ptl);
+ }
+ }
+
+ out:
+ i_mmap_unlock_read(mapping);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pgoff_mkclean);
+
/**
* page_move_anon_rmap - move a page to our anon_vma
* @page: the page to move to our anon_vma
--
2.1.0
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists