[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190723233016.26403-3-rcampbell@nvidia.com>
Date: Tue, 23 Jul 2019 16:30:16 -0700
From: Ralph Campbell <rcampbell@...dia.com>
To: <linux-mm@...ck.org>
CC: <linux-kernel@...r.kernel.org>,
Ralph Campbell <rcampbell@...dia.com>,
Jérôme Glisse <jglisse@...hat.com>,
Jason Gunthorpe <jgg@...lanox.com>,
Christoph Hellwig <hch@....de>
Subject: [PATCH 2/2] mm/hmm: make full use of walk_page_range()
hmm_range_snapshot() and hmm_range_fault() both call find_vma() and
walk_page_range() in a loop. This is unnecessary duplication since
walk_page_range() calls find_vma() in a loop already.
Simplify hmm_range_snapshot() and hmm_range_fault() by defining a
walk_test() callback function to filter unhandled vmas.
Signed-off-by: Ralph Campbell <rcampbell@...dia.com>
Cc: "Jérôme Glisse" <jglisse@...hat.com>
Cc: Jason Gunthorpe <jgg@...lanox.com>
Cc: Christoph Hellwig <hch@....de>
---
mm/hmm.c | 197 ++++++++++++++++++++-----------------------------------
1 file changed, 70 insertions(+), 127 deletions(-)
diff --git a/mm/hmm.c b/mm/hmm.c
index 8271f110c243..218557451070 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -839,13 +839,40 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
#endif
}
-static void hmm_pfns_clear(struct hmm_range *range,
- uint64_t *pfns,
- unsigned long addr,
- unsigned long end)
+static int hmm_vma_walk_test(unsigned long start,
+ unsigned long end,
+ struct mm_walk *walk)
{
- for (; addr < end; addr += PAGE_SIZE, pfns++)
- *pfns = range->values[HMM_PFN_NONE];
+ const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
+ struct hmm_vma_walk *hmm_vma_walk = walk->private;
+ struct hmm_range *range = hmm_vma_walk->range;
+ struct vm_area_struct *vma = walk->vma;
+
+ /* If range is no longer valid, force retry. */
+ if (!range->valid)
+ return -EBUSY;
+
+ if (vma->vm_flags & device_vma)
+ return -EFAULT;
+
+ if (is_vm_hugetlb_page(vma)) {
+ if (huge_page_shift(hstate_vma(vma)) != range->page_shift &&
+ range->page_shift != PAGE_SHIFT)
+ return -EINVAL;
+ } else {
+ if (range->page_shift != PAGE_SHIFT)
+ return -EINVAL;
+ }
+
+ /*
+ * If vma does not allow read access, then assume that it does not
+ * allow write access, either. HMM does not support architectures
+ * that allow write without read.
+ */
+ if (!(vma->vm_flags & VM_READ))
+ return -EPERM;
+
+ return 0;
}
/*
@@ -949,65 +976,28 @@ EXPORT_SYMBOL(hmm_range_unregister);
*/
long hmm_range_snapshot(struct hmm_range *range)
{
- const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
- unsigned long start = range->start, end;
- struct hmm_vma_walk hmm_vma_walk;
+ unsigned long start = range->start;
+ struct hmm_vma_walk hmm_vma_walk = {};
struct hmm *hmm = range->hmm;
- struct vm_area_struct *vma;
- struct mm_walk mm_walk;
+ struct mm_walk mm_walk = {};
+ int ret;
lockdep_assert_held(&hmm->mm->mmap_sem);
- do {
- /* If range is no longer valid force retry. */
- if (!range->valid)
- return -EBUSY;
- vma = find_vma(hmm->mm, start);
- if (vma == NULL || (vma->vm_flags & device_vma))
- return -EFAULT;
-
- if (is_vm_hugetlb_page(vma)) {
- if (huge_page_shift(hstate_vma(vma)) !=
- range->page_shift &&
- range->page_shift != PAGE_SHIFT)
- return -EINVAL;
- } else {
- if (range->page_shift != PAGE_SHIFT)
- return -EINVAL;
- }
+ hmm_vma_walk.range = range;
+ hmm_vma_walk.last = start;
+ mm_walk.private = &hmm_vma_walk;
- if (!(vma->vm_flags & VM_READ)) {
- /*
- * If vma do not allow read access, then assume that it
- * does not allow write access, either. HMM does not
- * support architecture that allow write without read.
- */
- hmm_pfns_clear(range, range->pfns,
- range->start, range->end);
- return -EPERM;
- }
+ mm_walk.mm = hmm->mm;
+ mm_walk.pud_entry = hmm_vma_walk_pud;
+ mm_walk.pmd_entry = hmm_vma_walk_pmd;
+ mm_walk.pte_hole = hmm_vma_walk_hole;
+ mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
+ mm_walk.test_walk = hmm_vma_walk_test;
- range->vma = vma;
- hmm_vma_walk.pgmap = NULL;
- hmm_vma_walk.last = start;
- hmm_vma_walk.fault = false;
- hmm_vma_walk.range = range;
- mm_walk.private = &hmm_vma_walk;
- end = min(range->end, vma->vm_end);
-
- mm_walk.vma = vma;
- mm_walk.mm = vma->vm_mm;
- mm_walk.pte_entry = NULL;
- mm_walk.test_walk = NULL;
- mm_walk.hugetlb_entry = NULL;
- mm_walk.pud_entry = hmm_vma_walk_pud;
- mm_walk.pmd_entry = hmm_vma_walk_pmd;
- mm_walk.pte_hole = hmm_vma_walk_hole;
- mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
-
- walk_page_range(start, end, &mm_walk);
- start = end;
- } while (start < range->end);
+ ret = walk_page_range(start, range->end, &mm_walk);
+ if (ret)
+ return ret;
return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
}
@@ -1043,83 +1033,36 @@ EXPORT_SYMBOL(hmm_range_snapshot);
*/
long hmm_range_fault(struct hmm_range *range, bool block)
{
- const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
- unsigned long start = range->start, end;
- struct hmm_vma_walk hmm_vma_walk;
+ unsigned long start = range->start;
+ struct hmm_vma_walk hmm_vma_walk = {};
struct hmm *hmm = range->hmm;
- struct vm_area_struct *vma;
- struct mm_walk mm_walk;
+ struct mm_walk mm_walk = {};
int ret;
lockdep_assert_held(&hmm->mm->mmap_sem);
- do {
- /* If range is no longer valid force retry. */
- if (!range->valid)
- return -EBUSY;
+ hmm_vma_walk.range = range;
+ hmm_vma_walk.last = start;
+ hmm_vma_walk.fault = true;
+ hmm_vma_walk.block = block;
+ mm_walk.private = &hmm_vma_walk;
- vma = find_vma(hmm->mm, start);
- if (vma == NULL || (vma->vm_flags & device_vma))
- return -EFAULT;
-
- if (is_vm_hugetlb_page(vma)) {
- if (huge_page_shift(hstate_vma(vma)) !=
- range->page_shift &&
- range->page_shift != PAGE_SHIFT)
- return -EINVAL;
- } else {
- if (range->page_shift != PAGE_SHIFT)
- return -EINVAL;
- }
+ mm_walk.mm = hmm->mm;
+ mm_walk.pud_entry = hmm_vma_walk_pud;
+ mm_walk.pmd_entry = hmm_vma_walk_pmd;
+ mm_walk.pte_hole = hmm_vma_walk_hole;
+ mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
+ mm_walk.test_walk = hmm_vma_walk_test;
- if (!(vma->vm_flags & VM_READ)) {
- /*
- * If vma do not allow read access, then assume that it
- * does not allow write access, either. HMM does not
- * support architecture that allow write without read.
- */
- hmm_pfns_clear(range, range->pfns,
- range->start, range->end);
- return -EPERM;
- }
+ do {
+ ret = walk_page_range(start, range->end, &mm_walk);
+ start = hmm_vma_walk.last;
- range->vma = vma;
- hmm_vma_walk.pgmap = NULL;
- hmm_vma_walk.last = start;
- hmm_vma_walk.fault = true;
- hmm_vma_walk.block = block;
- hmm_vma_walk.range = range;
- mm_walk.private = &hmm_vma_walk;
- end = min(range->end, vma->vm_end);
-
- mm_walk.vma = vma;
- mm_walk.mm = vma->vm_mm;
- mm_walk.pte_entry = NULL;
- mm_walk.test_walk = NULL;
- mm_walk.hugetlb_entry = NULL;
- mm_walk.pud_entry = hmm_vma_walk_pud;
- mm_walk.pmd_entry = hmm_vma_walk_pmd;
- mm_walk.pte_hole = hmm_vma_walk_hole;
- mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
-
- do {
- ret = walk_page_range(start, end, &mm_walk);
- start = hmm_vma_walk.last;
-
- /* Keep trying while the range is valid. */
- } while (ret == -EBUSY && range->valid);
-
- if (ret) {
- unsigned long i;
-
- i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
- hmm_pfns_clear(range, &range->pfns[i],
- hmm_vma_walk.last, range->end);
- return ret;
- }
- start = end;
+ /* Keep trying while the range is valid. */
+ } while (ret == -EBUSY && range->valid);
- } while (start < range->end);
+ if (ret)
+ return ret;
return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
}
--
2.20.1
Powered by blists - more mailing lists