[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190731154603.41797-15-steven.price@arm.com>
Date: Wed, 31 Jul 2019 16:45:55 +0100
From: Steven Price <steven.price@....com>
To: linux-mm@...ck.org
Cc: Steven Price <steven.price@....com>,
Andy Lutomirski <luto@...nel.org>,
Ard Biesheuvel <ard.biesheuvel@...aro.org>,
Arnd Bergmann <arnd@...db.de>, Borislav Petkov <bp@...en8.de>,
Catalin Marinas <catalin.marinas@....com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Ingo Molnar <mingo@...hat.com>,
James Morse <james.morse@....com>,
Jérôme Glisse <jglisse@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
Will Deacon <will@...nel.org>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Mark Rutland <Mark.Rutland@....com>,
"Liang, Kan" <kan.liang@...ux.intel.com>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: [PATCH v10 14/22] mm: pagewalk: Add 'depth' parameter to pte_hole
The pte_hole() callback is called at multiple levels of the page tables.
Code dumping the kernel page tables needs to know what at what depth
the missing entry is. Add this is an extra parameter to pte_hole().
When the depth isn't know (e.g. processing a vma) then -1 is passed.
The depth that is reported is the actual level where the entry is
missing (ignoring any folding that is in place), i.e. any levels where
PTRS_PER_P?D is set to 1 are ignored.
Note that depth starts at 0 for a PGD so that PUD/PMD/PTE retain their
natural numbers as levels 2/3/4.
Signed-off-by: Steven Price <steven.price@....com>
---
fs/proc/task_mmu.c | 4 ++--
include/linux/mm.h | 6 ++++--
mm/hmm.c | 2 +-
mm/migrate.c | 1 +
mm/mincore.c | 1 +
mm/pagewalk.c | 31 +++++++++++++++++++++++++------
6 files changed, 34 insertions(+), 11 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 731642e0f5a0..b2f87fde69eb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -504,7 +504,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
#ifdef CONFIG_SHMEM
static int smaps_pte_hole(unsigned long addr, unsigned long end,
- struct mm_walk *walk)
+ __always_unused int depth, struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
@@ -1274,7 +1274,7 @@ static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
}
static int pagemap_pte_hole(unsigned long start, unsigned long end,
- struct mm_walk *walk)
+ __always_unused int depth, struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
unsigned long addr = start;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e2581ec5324e..6b2e6d65cb4c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1440,7 +1440,9 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
* pmd_trans_huge() pmds. They may simply choose to
* split_huge_page() instead of handling it explicitly.
* @pte_entry: if set, called for each non-empty PTE (lowest-level) entry
- * @pte_hole: if set, called for each hole at all levels
+ * @pte_hole: if set, called for each hole at all levels,
+ * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD, 4:PTE
+ * any depths where PTRS_PER_P?D is equal to 1 are skipped
* @hugetlb_entry: if set, called for each hugetlb entry
* @test_walk: caller specific callback function to determine whether
* we walk over the current vma or not. Returning 0
@@ -1473,7 +1475,7 @@ struct mm_walk {
int (*pte_entry)(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk);
int (*pte_hole)(unsigned long addr, unsigned long next,
- struct mm_walk *walk);
+ int depth, struct mm_walk *walk);
int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long next,
struct mm_walk *walk);
diff --git a/mm/hmm.c b/mm/hmm.c
index e1eedef129cf..413944bb99dc 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -433,7 +433,7 @@ static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
}
static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
- struct mm_walk *walk)
+ __always_unused int depth, struct mm_walk *walk)
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
diff --git a/mm/migrate.c b/mm/migrate.c
index 8992741f10aa..b92014ceb6dc 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2130,6 +2130,7 @@ struct migrate_vma {
static int migrate_vma_collect_hole(unsigned long start,
unsigned long end,
+ __always_unused int depth,
struct mm_walk *walk)
{
struct migrate_vma *migrate = walk->private;
diff --git a/mm/mincore.c b/mm/mincore.c
index 4fe91d497436..8ba0fd80d449 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -112,6 +112,7 @@ static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
}
static int mincore_unmapped_range(unsigned long addr, unsigned long end,
+ __always_unused int depth,
struct mm_walk *walk)
{
walk->private += __mincore_unmapped_range(addr, end,
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 6bea79b95be3..cecc91259707 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -4,6 +4,22 @@
#include <linux/sched.h>
#include <linux/hugetlb.h>
+/*
+ * We want to know the real level where a entry is located ignoring any
+ * folding of levels which may be happening. For example if p4d is folded then
+ * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
+ */
+static int real_depth(int depth)
+{
+ if (depth == 3 && PTRS_PER_PMD == 1)
+ depth = 2;
+ if (depth == 2 && PTRS_PER_PUD == 1)
+ depth = 1;
+ if (depth == 1 && PTRS_PER_P4D == 1)
+ depth = 0;
+ return depth;
+}
+
static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
@@ -31,6 +47,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
pmd_t *pmd;
unsigned long next;
int err = 0;
+ int depth = real_depth(3);
if (walk->test_pmd) {
err = walk->test_pmd(addr, end, pmd_offset(pud, 0UL), walk);
@@ -46,7 +63,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) {
if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ err = walk->pte_hole(addr, next, depth, walk);
if (err)
break;
continue;
@@ -89,6 +106,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
pud_t *pud;
unsigned long next;
int err = 0;
+ int depth = real_depth(2);
if (walk->test_pud) {
err = walk->test_pud(addr, end, pud_offset(p4d, 0UL), walk);
@@ -104,7 +122,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
next = pud_addr_end(addr, end);
if (pud_none(*pud)) {
if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ err = walk->pte_hole(addr, next, depth, walk);
if (err)
break;
continue;
@@ -139,6 +157,7 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
p4d_t *p4d;
unsigned long next;
int err = 0;
+ int depth = real_depth(1);
if (walk->test_p4d) {
err = walk->test_p4d(addr, end, p4d_offset(pgd, 0UL), walk);
@@ -153,7 +172,7 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d)) {
if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ err = walk->pte_hole(addr, next, depth, walk);
if (err)
break;
continue;
@@ -184,7 +203,7 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ err = walk->pte_hole(addr, next, 0, walk);
if (err)
break;
continue;
@@ -230,7 +249,7 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
if (pte)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
else if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ err = walk->pte_hole(addr, next, -1, walk);
if (err)
break;
@@ -273,7 +292,7 @@ static int walk_page_test(unsigned long start, unsigned long end,
if (vma->vm_flags & VM_PFNMAP) {
int err = 1;
if (walk->pte_hole)
- err = walk->pte_hole(start, end, walk);
+ err = walk->pte_hole(start, end, -1, walk);
return err ? err : 1;
}
return 0;
--
2.20.1
Powered by blists - more mailing lists