[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241116175922.3265872-3-pasha.tatashin@soleen.com>
Date: Sat, 16 Nov 2024 17:59:18 +0000
From: Pasha Tatashin <pasha.tatashin@...een.com>
To: pasha.tatashin@...een.com,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
linux-doc@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
cgroups@...r.kernel.org,
linux-kselftest@...r.kernel.org,
akpm@...ux-foundation.org,
corbet@....net,
derek.kiernan@....com,
dragan.cvetic@....com,
arnd@...db.de,
gregkh@...uxfoundation.org,
viro@...iv.linux.org.uk,
brauner@...nel.org,
jack@...e.cz,
tj@...nel.org,
hannes@...xchg.org,
mhocko@...nel.org,
roman.gushchin@...ux.dev,
shakeel.butt@...ux.dev,
muchun.song@...ux.dev,
Liam.Howlett@...cle.com,
lorenzo.stoakes@...cle.com,
vbabka@...e.cz,
jannh@...gle.com,
shuah@...nel.org,
vegard.nossum@...cle.com,
vattunuru@...vell.com,
schalla@...vell.com,
david@...hat.com,
willy@...radead.org,
osalvador@...e.de,
usama.anjum@...labora.com,
andrii@...nel.org,
ryan.roberts@....com,
peterx@...hat.com,
oleg@...hat.com,
tandersen@...flix.com,
rientjes@...gle.com,
gthelen@...gle.com
Subject: [RFCv1 2/6] pagewalk: Add a page table walker for init_mm page table
Page Detective will use it to walk the kernel page table. Make this
function accessible from modules, and also while here make
walk_page_range() accessible from modules, so Page Detective could
use it to walk user page tables.
Signed-off-by: Pasha Tatashin <pasha.tatashin@...een.com>
---
include/linux/pagewalk.h | 2 ++
mm/pagewalk.c | 32 ++++++++++++++++++++++++++++++++
2 files changed, 34 insertions(+)
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index f5eb5a32aeed..ff25374470f0 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -124,6 +124,8 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
+int walk_page_range_kernel(unsigned long start, unsigned long end,
+ const struct mm_walk_ops *ops, void *private);
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private);
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 5f9f01532e67..050790aeb15f 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -478,6 +478,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
} while (start = next, start < end);
return err;
}
+EXPORT_SYMBOL_GPL(walk_page_range);
/**
* walk_page_range_novma - walk a range of pagetables not backed by a vma
@@ -541,6 +542,37 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
return walk_pgd_range(start, end, &walk);
}
+/**
+ * walk_page_range_kernel - walk a range of pagetables of kernel/init_mm
+ * @start: start address of the virtual address range
+ * @end: end address of the virtual address range
+ * @ops: operation to call during the walk
+ * @private: private data for callbacks' usage
+ *
+ * Similar to walk_page_range_novma() but specifically walks init_mm.pgd table.
+ *
+ * Note: This function takes two looks: get_online_mems(), and mmap_read, this
+ * is to prevent kernel page tables from being freed while walking.
+ */
+int walk_page_range_kernel(unsigned long start, unsigned long end,
+ const struct mm_walk_ops *ops, void *private)
+{
+ get_online_mems();
+ if (mmap_read_lock_killable(&init_mm)) {
+ put_online_mems();
+ return -EAGAIN;
+ }
+
+ walk_page_range_novma(&init_mm, start, end, ops,
+ init_mm.pgd, private);
+
+ mmap_read_unlock(&init_mm);
+ put_online_mems();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(walk_page_range_kernel);
+
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private)
--
2.47.0.338.g60cca15819-goog
Powered by blists - more mailing lists