[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220930141931.174362-6-david@redhat.com>
Date: Fri, 30 Sep 2022 16:19:29 +0200
From: David Hildenbrand <david@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, linux-kselftest@...r.kernel.org,
David Hildenbrand <david@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Shuah Khan <shuah@...nel.org>, Hugh Dickins <hughd@...gle.com>,
Vlastimil Babka <vbabka@...e.cz>, Peter Xu <peterx@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Jason Gunthorpe <jgg@...dia.com>,
John Hubbard <jhubbard@...dia.com>
Subject: [PATCH v1 5/7] mm/pagewalk: add walk_page_range_vma()
Let's add walk_page_range_vma(), which is similar to walk_page_vma(),
however, is only interested in a subset of the VMA range.
To be used in KSM code to stop using follow_page() next.
Signed-off-by: David Hildenbrand <david@...hat.com>
---
include/linux/pagewalk.h | 3 +++
mm/pagewalk.c | 27 +++++++++++++++++++++++++++
2 files changed, 30 insertions(+)
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index f3fafb731ffd..2f8f6cc980b4 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -99,6 +99,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
pgd_t *pgd,
void *private);
+int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ void *private);
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private);
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 131b2b335b2c..757c075da231 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -514,6 +514,33 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
return __walk_page_range(start, end, &walk);
}
+int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ void *private)
+{
+ struct mm_walk walk = {
+ .ops = ops,
+ .mm = vma->vm_mm,
+ .vma = vma,
+ .private = private,
+ };
+ int err;
+
+ if (start >= end || !walk.mm)
+ return -EINVAL;
+ if (start < vma->vm_start || end > vma->vm_end)
+ return -EINVAL;
+
+ mmap_assert_locked(walk.mm);
+
+ err = walk_page_test(start, end, &walk);
+ if (err > 0)
+ return 0;
+ if (err < 0)
+ return err;
+ return __walk_page_range(start, end, &walk);
+}
+
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private)
{
--
2.37.3
Powered by blists - more mailing lists