lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20140602213657.A393F169@viggo.jf.intel.com>
Date:	Mon, 02 Jun 2014 14:36:57 -0700
From:	Dave Hansen <dave@...1.net>
To:	linux-kernel@...r.kernel.org
Cc:	linux-mm@...ck.org, kirill.shutemov@...ux.intel.com,
	Dave Hansen <dave@...1.net>
Subject: [PATCH 10/10] mm: pagewalk: use locked walker for /proc/pid/numa_maps


From: Dave Hansen <dave.hansen@...ux.intel.com>

Same deal as the last one.  Lots of code savings using the new
walker function.


Signed-off-by: Dave Hansen <dave.hansen@...ux.intel.com>
---

 b/fs/proc/task_mmu.c |   39 ++++++++-------------------------------
 1 file changed, 8 insertions(+), 31 deletions(-)

diff -puN fs/proc/task_mmu.c~mm-pagewalk-use-locked-walker-numa_maps fs/proc/task_mmu.c
--- a/fs/proc/task_mmu.c~mm-pagewalk-use-locked-walker-numa_maps	2014-06-02 14:20:21.518907178 -0700
+++ b/fs/proc/task_mmu.c	2014-06-02 14:20:21.522907359 -0700
@@ -1280,41 +1280,18 @@ static struct page *can_gather_numa_stat
 	return page;
 }
 
-static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
-		unsigned long end, struct mm_walk *walk)
+static int gather_stats_locked(pte_t *pte, unsigned long addr,
+		unsigned long size, struct mm_walk *walk)
 {
-	struct numa_maps *md;
-	spinlock_t *ptl;
-	pte_t *orig_pte;
-	pte_t *pte;
-
-	md = walk->private;
+	struct numa_maps *md = walk->private;
+	struct page *page = can_gather_numa_stats(*pte, walk->vma, addr);
 
-	if (pmd_trans_huge_lock(pmd, walk->vma, &ptl) == 1) {
-		pte_t huge_pte = *(pte_t *)pmd;
-		struct page *page;
-
-		page = can_gather_numa_stats(huge_pte, walk->vma, addr);
-		if (page)
-			gather_stats(page, md, pte_dirty(huge_pte),
-				     HPAGE_PMD_SIZE/PAGE_SIZE);
-		spin_unlock(ptl);
-		return 0;
-	}
+	if (page)
+		gather_stats(page, md, pte_dirty(*pte), size/PAGE_SIZE);
 
-	if (pmd_trans_unstable(pmd))
-		return 0;
-	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
-	do {
-		struct page *page = can_gather_numa_stats(*pte, walk->vma, addr);
-		if (!page)
-			continue;
-		gather_stats(page, md, pte_dirty(*pte), 1);
-
-	} while (pte++, addr += PAGE_SIZE, addr != end);
-	pte_unmap_unlock(orig_pte, ptl);
 	return 0;
 }
+
 #ifdef CONFIG_HUGETLB_PAGE
 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
 		unsigned long addr, unsigned long end, struct mm_walk *walk)
@@ -1366,7 +1343,7 @@ static int show_numa_map(struct seq_file
 	memset(md, 0, sizeof(*md));
 
 	walk.hugetlb_entry = gather_hugetbl_stats;
-	walk.pmd_entry = gather_pte_stats;
+	walk.locked_single_entry = gather_stats_locked;
 	walk.private = md;
 	walk.mm = mm;
 
_
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ