lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 24 May 2017 13:19:55 +0200
From:   Laurent Dufour <ldufour@...ux.vnet.ibm.com>
To:     linux-mm@...ck.org
Cc:     Davidlohr Bueso <dave@...olabs.net>, akpm@...ux-foundation.org,
        Jan Kara <jack@...e.cz>,
        "Kirill A . Shutemov" <kirill@...temov.name>,
        Michal Hocko <mhocko@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Andi Kleen <andi@...stfloor.org>, haren@...ux.vnet.ibm.com,
        aneesh.kumar@...ux.vnet.ibm.com, khandual@...ux.vnet.ibm.com,
        paulmck@...ux.vnet.ibm.com, linux-kernel@...r.kernel.org
Subject: [RFC v2 04/10] mm: Handle range lock field when collapsing huge pages

When collapsing huge pages from swap in operatioin, a vm_fault
structure is built and passed to do_swap_page(). The new range field
of the vm_fault structure must be set correctly when dealing with
range_lock.

Signed-off-by: Laurent Dufour <ldufour@...ux.vnet.ibm.com>
---
 mm/khugepaged.c | 39 +++++++++++++++++++++++++++++++++------
 1 file changed, 33 insertions(+), 6 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 945fd1ca49b5..6357f32608a5 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -872,7 +872,11 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 					struct vm_area_struct *vma,
 					unsigned long address, pmd_t *pmd,
-					int referenced)
+					int referenced
+#ifdef CONFIG_MEM_RANGE_LOCK
+					, struct range_lock *range
+#endif
+	)
 {
 	int swapped_in = 0, ret = 0;
 	struct vm_fault vmf = {
@@ -881,6 +885,9 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 		.flags = FAULT_FLAG_ALLOW_RETRY,
 		.pmd = pmd,
 		.pgoff = linear_page_index(vma, address),
+#ifdef CONFIG_MEM_RANGE_LOCK
+		.lockrange = range,
+#endif
 	};
 
 	/* we only decide to swapin, if there is enough young ptes */
@@ -927,7 +934,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 static void collapse_huge_page(struct mm_struct *mm,
 				   unsigned long address,
 				   struct page **hpage,
-				   int node, int referenced)
+				   int node, int referenced
+#ifdef CONFIG_MEM_RANGE_LOCK
+				   , struct range_lock *range
+#endif
+				   )
 {
 	pmd_t *pmd, _pmd;
 	pte_t *pte;
@@ -985,7 +996,11 @@ static void collapse_huge_page(struct mm_struct *mm,
 	 * If it fails, we release mmap_sem and jump out_nolock.
 	 * Continuing to collapse causes inconsistency.
 	 */
-	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
+	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced
+#ifdef CONFIG_MEM_RANGE_LOCK
+					 , range
+#endif
+		    )) {
 		mem_cgroup_cancel_charge(new_page, memcg, true);
 		up_read(&mm->mmap_sem);
 		goto out_nolock;
@@ -1092,7 +1107,11 @@ static void collapse_huge_page(struct mm_struct *mm,
 static int khugepaged_scan_pmd(struct mm_struct *mm,
 			       struct vm_area_struct *vma,
 			       unsigned long address,
-			       struct page **hpage)
+			       struct page **hpage
+#ifdef CONFIG_MEM_RANGE_LOCK
+			       , struct range_lock *range
+#endif
+	)
 {
 	pmd_t *pmd;
 	pte_t *pte, *_pte;
@@ -1206,7 +1225,11 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
 	if (ret) {
 		node = khugepaged_find_target_node();
 		/* collapse_huge_page will return with the mmap_sem released */
-		collapse_huge_page(mm, address, hpage, node, referenced);
+		collapse_huge_page(mm, address, hpage, node, referenced
+#ifdef CONFIG_MEM_RANGE_LOCK
+				   , range
+#endif
+			);
 	}
 out:
 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
@@ -1727,7 +1750,11 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 			} else {
 				ret = khugepaged_scan_pmd(mm, vma,
 						khugepaged_scan.address,
-						hpage);
+						hpage
+#ifdef CONFIG_MEM_RANGE_LOCK
+						, &range
+#endif
+						);
 			}
 			/* move to next address */
 			khugepaged_scan.address += HPAGE_PMD_SIZE;
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ