lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:	Thu, 18 Oct 2012 10:04:41 -0700
From:	tip-bot for Peter Zijlstra <a.p.zijlstra@...llo.nl>
To:	linux-tip-commits@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, hpa@...or.com, mingo@...nel.org,
	a.p.zijlstra@...llo.nl, tglx@...utronix.de
Subject: [tip:numa/core] sched/numa/mm:
  Avoid pointless TLB invalidation from page-migration

Commit-ID:  5cc4a4cb0abc63699b6741d7737e07e49b502782
Gitweb:     http://git.kernel.org/tip/5cc4a4cb0abc63699b6741d7737e07e49b502782
Author:     Peter Zijlstra <a.p.zijlstra@...llo.nl>
AuthorDate: Thu, 11 Oct 2012 17:42:06 +0200
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Mon, 15 Oct 2012 13:56:50 +0200

sched/numa/mm: Avoid pointless TLB invalidation from page-migration

When we do migrate-on-fault we faulted on a PROT_NONE entry, this is
a !present entry, we then replace this with a regular entry and then
proceed with page-migration.

Page-migration in turn replaces the now valid entry with a
migration-PTE (which is again !present) which requires a TLB
invalidate.

Instead, leave the PROT_NONE entry in-place when we need to migrate
such that the PROT_NONE -> migration-PTE transition is a !preset ->
!preset transition and doesn't require a TLB invalidate.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Link: http://lkml.kernel.org/n/tip-h1utred3dhv2ausjg1wqwuym@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 mm/memory.c |   63 +++++++++++++++++++++++++++++------------------------------
 1 files changed, 31 insertions(+), 32 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 9ada7ed..8b1ad86 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3442,38 +3442,12 @@ static bool pte_prot_none(struct vm_area_struct *vma, pte_t pte)
 	return pte_same(pte, pte_modify(pte, vma_prot_none(vma)));
 }
 
-#ifdef CONFIG_NUMA
-
-
-static void do_prot_none_numa(struct mm_struct *mm, struct vm_area_struct *vma,
-			      unsigned long address, struct page *page)
-{
-	int node, page_nid = page_to_nid(page);
-
-	task_numa_placement();
-
-	/*
-	 * For NUMA systems we use the special PROT_NONE maps to drive
-	 * lazy page migration, see MPOL_MF_LAZY and related.
-	 */
-	node = mpol_misplaced(page, vma, address);
-	if (node != -1 && !migrate_misplaced_page(mm, page, node))
-		page_nid = node;
-
-	task_numa_fault(page_nid);
-}
-#else
-static void do_prot_none_numa(struct mm_struct *mm, struct vm_area_struct *vma,
-			      unsigned long address, struct page *page)
-{
-}
-#endif /* CONFIG_NUMA */
-
 static int do_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
 			unsigned long address, pte_t *ptep, pmd_t *pmd,
 			unsigned int flags, pte_t entry)
 {
 	struct page *page = NULL;
+	int node, page_nid = -1;
 	spinlock_t *ptl;
 
 	ptl = pte_lockptr(mm, pmd);
@@ -3481,6 +3455,16 @@ static int do_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
 	if (unlikely(!pte_same(*ptep, entry)))
 		goto unlock;
 
+	page = vm_normal_page(vma, address, entry);
+	if (page) {
+		get_page(page);
+		page_nid = page_to_nid(page);
+		node = mpol_misplaced(page, vma, address);
+		if (node != -1)
+			goto migrate;
+	}
+
+fixup:
 	flush_cache_page(vma, address, pte_pfn(entry));
 
 	ptep_modify_prot_start(mm, address, ptep);
@@ -3489,17 +3473,32 @@ static int do_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
 
 	update_mmu_cache(vma, address, ptep);
 
-	page = vm_normal_page(vma, address, entry);
-	if (page)
-		get_page(page);
-
 unlock:
 	pte_unmap_unlock(ptep, ptl);
+out:
 	if (page) {
-		do_prot_none_numa(mm, vma, address, page);
+		task_numa_fault(page_nid, 1);
 		put_page(page);
 	}
+
 	return 0;
+
+migrate:
+	pte_unmap_unlock(ptep, ptl);
+
+	if (!migrate_misplaced_page(mm, page, node)) {
+		page_nid = node;
+		goto out;
+	}
+
+	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+	if (!pte_same(*ptep, entry)) {
+		put_page(page);
+		page = NULL;
+		goto unlock;
+	}
+
+	goto fixup;
 }
 
 /*
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ