[<prev] [next>] [day] [month] [year] [list]
Message-ID: <4B1CB5DD.8050803@ah.jp.nec.com>
Date: Mon, 07 Dec 2009 16:59:25 +0900
From: Naoya Horiguchi <n-horiguchi@...jp.nec.com>
To: LKML <linux-kernel@...r.kernel.org>
CC: ak@...ux.intel.com, Wu Fengguang <fengguang.wu@...el.com>,
linux-mm <linux-mm@...ck.org>
Subject: [PATCH 2/2] mm hugetlb: add hugepage support to pagemap
This patch enables to extract pfn of the hugepage from
/proc/pid/pagemap in architecture independent manner.
Signed-off-by: Naoya Horiguchi <n-horiguchi@...jp.nec.com>
Cc: Andi Kleen <ak@...ux.intel.com>
Cc: Wu Fengguang <fengguang.wu@...el.com>
---
fs/proc/task_mmu.c | 43 +++++++++++++++++++++++++++++++++++++++++++
include/linux/mm.h | 3 +++
mm/pagewalk.c | 18 ++++++++++++++++--
3 files changed, 62 insertions(+), 2 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2a1bef9..5d8e86b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -650,6 +650,48 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
return err;
}
+static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
+{
+ u64 pme = 0;
+ if (pte_present(pte))
+ pme = PM_PFRAME(pte_pfn(pte) + offset)
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
+ return pme;
+}
+
+static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct vm_area_struct *vma;
+ struct pagemapread *pm = walk->private;
+ struct hstate *hs = NULL;
+ int err = 0;
+
+ vma = find_vma(walk->mm, addr);
+ hs = hstate_vma(vma);
+ for (; addr != end; addr += PAGE_SIZE) {
+ u64 pfn = PM_NOT_PRESENT;
+
+ if (vma && (addr >= vma->vm_end)) {
+ vma = find_vma(walk->mm, addr);
+ hs = hstate_vma(vma);
+ }
+
+ if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) {
+ /* calculate pfn of the "raw" page in the hugepage. */
+ int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT;
+ pfn = huge_pte_to_pagemap_entry(*pte, offset);
+ }
+ err = add_to_pagemap(addr, pfn, pm);
+ if (err)
+ return err;
+ }
+
+ cond_resched();
+
+ return err;
+}
+
/*
* /proc/pid/pagemap - an array mapping virtual pages to pfns
*
@@ -742,6 +784,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
+ pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
pagemap_walk.mm = mm;
pagemap_walk.private = ±
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4d33403..14835f0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -758,6 +758,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
+ * @hugetlb_entry: if set, called for each hugetlb entry
*
* (see walk_page_range for more details)
*/
@@ -767,6 +768,8 @@ struct mm_walk {
int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
+ struct mm_walk *);
struct mm_struct *mm;
void *private;
};
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 3d88824..2d27a23 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -106,9 +106,11 @@ int walk_page_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pgd_t *pgd;
+ pte_t *pte;
unsigned long next;
int err = 0;
struct vm_area_struct *vma;
+ struct hstate *hs;
if (addr >= end)
return err;
@@ -120,11 +122,23 @@ int walk_page_range(unsigned long addr, unsigned long end,
do {
next = pgd_addr_end(addr, end);
- /* skip hugetlb vma to avoid hugepage PMD being cleared
- * in pmd_none_or_clear_bad(). */
+ /*
+ * handle hugetlb vma individually because pagetable walk for
+ * the hugetlb page is dependent on the architecture and
+ * we can't handled it in the same manner as non-huge pages.
+ */
vma = find_vma(walk->mm, addr);
if (is_vm_hugetlb_page(vma)) {
next = (vma->vm_end < next) ? vma->vm_end : next;
+ hs = hstate_vma(vma);
+ pte = huge_pte_offset(walk->mm,
+ addr & huge_page_mask(hs));
+ if (pte && !huge_pte_none(huge_ptep_get(pte))
+ && walk->hugetlb_entry)
+ err = walk->hugetlb_entry(pte, addr,
+ next, walk);
+ if (err)
+ break;
continue;
}
--
1.6.0.6
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists