[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200928175428.4110504-24-zi.yan@sent.com>
Date: Mon, 28 Sep 2020 13:54:21 -0400
From: Zi Yan <zi.yan@...t.com>
To: linux-mm@...ck.org
Cc: "Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Roman Gushchin <guro@...com>, Rik van Riel <riel@...riel.com>,
Matthew Wilcox <willy@...radead.org>,
Shakeel Butt <shakeelb@...gle.com>,
Yang Shi <shy828301@...il.com>,
Jason Gunthorpe <jgg@...dia.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Michal Hocko <mhocko@...e.com>,
David Hildenbrand <david@...hat.com>,
William Kucharski <william.kucharski@...cle.com>,
Andrea Arcangeli <aarcange@...hat.com>,
John Hubbard <jhubbard@...dia.com>,
David Nellans <dnellans@...dia.com>,
linux-kernel@...r.kernel.org, Zi Yan <ziy@...dia.com>
Subject: [RFC PATCH v2 23/30] mm: support PUD THP pagemap support.
From: Zi Yan <ziy@...dia.com>
pagemap_pud_range is added to print pud page flags properly.
Signed-off-by: Zi Yan <ziy@...dia.com>
---
fs/proc/task_mmu.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 077196182288..04a3158d0d5b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1553,6 +1553,68 @@ static int pagemap_pmd_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
return err;
}
+static int pagemap_pud_range(pud_t pud, pud_t *pudp, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct vm_area_struct *vma = walk->vma;
+ struct pagemapread *pm = walk->private;
+ spinlock_t *ptl;
+ int err = 0;
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ ptl = pud_trans_huge_lock(pudp, vma);
+ if (ptl) {
+ u64 flags = 0, frame = 0;
+ struct page *page = NULL;
+
+ if (memcmp(pudp, &pud, sizeof(pud)) != 0) {
+ walk->action = ACTION_AGAIN;
+ spin_unlock(ptl);
+ return 0;
+ }
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ flags |= PM_SOFT_DIRTY;
+
+ if (pud_present(pud)) {
+ page = pud_page(pud);
+
+ flags |= PM_PRESENT;
+ if (pud_soft_dirty(pud))
+ flags |= PM_SOFT_DIRTY;
+ if (pm->show_pfn)
+ frame = pud_pfn(pud) +
+ ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ }
+
+ if (page && page_mapcount(page) == 1)
+ flags |= PM_MMAP_EXCLUSIVE;
+
+ for (; addr != end; addr += PAGE_SIZE) {
+ pagemap_entry_t pme = make_pme(frame, flags);
+
+ err = add_to_pagemap(addr, &pme, pm);
+ if (err)
+ break;
+ if (pm->show_pfn) {
+ if (flags & PM_PRESENT)
+ frame++;
+ else if (flags & PM_SWAP)
+ frame += (1 << MAX_SWAPFILES_SHIFT);
+ }
+ }
+ spin_unlock(ptl);
+ walk->action = ACTION_CONTINUE;
+ return err;
+ }
+
+ if (pud_trans_unstable(&pud)) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+ return err;
+}
+
#ifdef CONFIG_HUGETLB_PAGE
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
@@ -1603,6 +1665,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
#endif /* HUGETLB_PAGE */
static const struct mm_walk_ops pagemap_ops = {
+ .pud_entry = pagemap_pud_range,
.pmd_entry = pagemap_pmd_range,
.pte_hole = pagemap_pte_hole,
.hugetlb_entry = pagemap_hugetlb_range,
--
2.28.0
Powered by blists - more mailing lists