lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sun, 21 Oct 2012 08:20:13 -0700
From:	tip-bot for Rik van Riel <riel@...hat.com>
To:	linux-tip-commits@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, riel@...hat.com, hpa@...or.com,
	mingo@...nel.org, tglx@...utronix.de
Subject: [tip:numa/core] numa, mm:
  Rename the PROT_NONE fault handling functions to *_numa()

Commit-ID:  2458840fddea542391d343dac734d149607db709
Gitweb:     http://git.kernel.org/tip/2458840fddea542391d343dac734d149607db709
Author:     Rik van Riel <riel@...hat.com>
AuthorDate: Thu, 18 Oct 2012 17:20:21 -0400
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Sun, 21 Oct 2012 15:41:26 +0200

numa, mm: Rename the PROT_NONE fault handling functions to *_numa()

Having the function name indicate what the function is used
for makes the code a little easier to read.  Furthermore,
the fault handling code largely consists of do_...._page
functions.

Rename the NUMA working set sampling fault handling functions
to _numa() names, to indicate what they are used for.

This separates the naming from the regular PROT_NONE namings.

Signed-off-by: Rik van Riel <riel@...hat.com>
Cc: aarcange@...hat.com
Cc: a.p.zijlstra@...llo.nl
Link: http://lkml.kernel.org/r/20121018172021.0b1f6e3d@cuia.bos.redhat.com
[ Converted two more usage sites ]
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 include/linux/huge_mm.h |    8 ++++----
 mm/huge_memory.c        |    4 ++--
 mm/memory.c             |   22 ++++++++++++----------
 3 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index bcbe467..4f0f948 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -160,9 +160,9 @@ static inline struct page *compound_trans_head(struct page *page)
 	return page;
 }
 
-extern bool pmd_prot_none(struct vm_area_struct *vma, pmd_t pmd);
+extern bool pmd_numa(struct vm_area_struct *vma, pmd_t pmd);
 
-extern void do_huge_pmd_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
+extern void do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 				  unsigned long address, pmd_t *pmd,
 				  unsigned int flags, pmd_t orig_pmd);
 
@@ -203,12 +203,12 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd,
 	return 0;
 }
 
-static inline bool pmd_prot_none(struct vm_area_struct *vma, pmd_t pmd)
+static inline bool pmd_numa(struct vm_area_struct *vma, pmd_t pmd)
 {
 	return false;
 }
 
-static inline void do_huge_pmd_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
+static inline void do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 				  unsigned long address, pmd_t *pmd,
 				  unsigned int flags, pmd_t orig_pmd)
 {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c58a5f0..a8f6531 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -727,7 +727,7 @@ out:
 	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 }
 
-bool pmd_prot_none(struct vm_area_struct *vma, pmd_t pmd)
+bool pmd_numa(struct vm_area_struct *vma, pmd_t pmd)
 {
 	/*
 	 * See pte_prot_none().
@@ -738,7 +738,7 @@ bool pmd_prot_none(struct vm_area_struct *vma, pmd_t pmd)
 	return pmd_same(pmd, pmd_modify(pmd, vma_prot_none(vma)));
 }
 
-void do_huge_pmd_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
+void do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			   unsigned long address, pmd_t *pmd,
 			   unsigned int flags, pmd_t entry)
 {
diff --git a/mm/memory.c b/mm/memory.c
index 2cc8a29..23d4bd4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1471,11 +1471,13 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
-static bool pte_prot_none(struct vm_area_struct *vma, pte_t pte)
+static bool pte_numa(struct vm_area_struct *vma, pte_t pte)
 {
 	/*
-	 * If we have the normal vma->vm_page_prot protections we're not a
-	 * 'special' PROT_NONE page.
+	 * For NUMA page faults, we use PROT_NONE ptes in VMAs with
+	 * "normal" vma->vm_page_prot protections.  Genuine PROT_NONE
+	 * VMAs should never get here, because the fault handling code
+	 * will notice that the VMA has no read or write permissions.
 	 *
 	 * This means we cannot get 'special' PROT_NONE faults from genuine
 	 * PROT_NONE maps, nor from PROT_WRITE file maps that do dirty
@@ -1543,7 +1545,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
 		goto out;
 	}
-	if ((flags & FOLL_NUMA) && pmd_prot_none(vma, *pmd))
+	if ((flags & FOLL_NUMA) && pmd_numa(vma, *pmd))
 		goto no_page_table;
 	if (pmd_trans_huge(*pmd)) {
 		if (flags & FOLL_SPLIT) {
@@ -1574,7 +1576,7 @@ split_fallthrough:
 	pte = *ptep;
 	if (!pte_present(pte))
 		goto no_page;
-	if ((flags & FOLL_NUMA) && pte_prot_none(vma, pte))
+	if ((flags & FOLL_NUMA) && pte_numa(vma, pte))
 		goto no_page;
 	if ((flags & FOLL_WRITE) && !pte_write(pte))
 		goto unlock;
@@ -3476,7 +3478,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
 
-static int do_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
+static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			unsigned long address, pte_t *ptep, pmd_t *pmd,
 			unsigned int flags, pte_t entry)
 {
@@ -3601,8 +3603,8 @@ int handle_pte_fault(struct mm_struct *mm,
 					pte, pmd, flags, entry);
 	}
 
-	if (pte_prot_none(vma, entry))
-		return do_prot_none(mm, vma, address, pte, pmd, flags, entry);
+	if (pte_numa(vma, entry))
+		return do_numa_page(mm, vma, address, pte, pmd, flags, entry);
 
 	ptl = pte_lockptr(mm, pmd);
 	spin_lock(ptl);
@@ -3672,8 +3674,8 @@ retry:
 
 		barrier();
 		if (pmd_trans_huge(orig_pmd) && !pmd_trans_splitting(orig_pmd)) {
-			if (pmd_prot_none(vma, orig_pmd)) {
-				do_huge_pmd_prot_none(mm, vma, address, pmd,
+			if (pmd_numa(vma, orig_pmd)) {
+				do_huge_pmd_numa_page(mm, vma, address, pmd,
 						      flags, orig_pmd);
 			}
 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ