lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1459213970-17957-3-git-send-email-mike.kravetz@oracle.com>
Date:	Mon, 28 Mar 2016 18:12:50 -0700
From:	Mike Kravetz <mike.kravetz@...cle.com>
To:	linux-mm@...ck.org, linux-kernel@...r.kernel.org, x86@...nel.org
Cc:	Hugh Dickins <hughd@...gle.com>,
	Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
	Hillf Danton <hillf.zj@...baba-inc.com>,
	"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
	David Rientjes <rientjes@...gle.com>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	Catalin Marinas <catalin.marinas@....com>,
	Will Deacon <will.deacon@....com>,
	Steve Capper <steve.capper@...aro.org>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Mike Kravetz <mike.kravetz@...cle.com>
Subject: [RFC PATCH 2/2] x86/hugetlb: Attempt PUD_SIZE mapping alignment if PMD sharing enabled

When creating a hugetlb mapping, attempt PUD_SIZE alignment if the
following conditions are met:
- Address passed to mmap or shmat is NULL
- The mapping is flaged as shared
- The mapping is at least PUD_SIZE in length
If a PUD_SIZE aligned mapping can not be created, then fall back to a
huge page size mapping.

Signed-off-by: Mike Kravetz <mike.kravetz@...cle.com>
---
 arch/x86/mm/hugetlbpage.c | 64 ++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 61 insertions(+), 3 deletions(-)

diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 42982b2..4f53af5 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -78,14 +78,39 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 {
 	struct hstate *h = hstate_file(file);
 	struct vm_unmapped_area_info info;
+	bool pud_size_align = false;
+	unsigned long ret_addr;
+
+	/*
+	 * If PMD sharing is enabled, align to PUD_SIZE to facilitate
+	 * sharing.  Only attempt alignment if no address was passed in,
+	 * flags indicate sharing and size is big enough.
+	 */
+	if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
+	    !addr && flags & MAP_SHARED && len >= PUD_SIZE)
+		pud_size_align = true;
 
 	info.flags = 0;
 	info.length = len;
 	info.low_limit = current->mm->mmap_legacy_base;
 	info.high_limit = TASK_SIZE;
-	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+	if (pud_size_align)
+		info.align_mask = PAGE_MASK & (PUD_SIZE - 1);
+	else
+		info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 	info.align_offset = 0;
-	return vm_unmapped_area(&info);
+	ret_addr = vm_unmapped_area(&info);
+
+	/*
+	 * If failed with PUD_SIZE alignment, try again with huge page
+	 * size alignment.
+	 */
+	if ((ret_addr & ~PAGE_MASK) && pud_size_align) {
+		info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+		ret_addr = vm_unmapped_area(&info);
+	}
+
+	return ret_addr;
 }
 
 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
@@ -95,16 +120,38 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 	struct hstate *h = hstate_file(file);
 	struct vm_unmapped_area_info info;
 	unsigned long addr;
+	bool pud_size_align = false;
+
+	/*
+	 * If PMD sharing is enabled, align to PUD_SIZE to facilitate
+	 * sharing.  Only attempt alignment if no address was passed in,
+	 * flags indicate sharing and size is big enough.
+	 */
+	if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
+	    !addr0 && flags & MAP_SHARED && len >= PUD_SIZE)
+		pud_size_align = true;
 
 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 	info.length = len;
 	info.low_limit = PAGE_SIZE;
 	info.high_limit = current->mm->mmap_base;
-	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+	if (pud_size_align)
+		info.align_mask = PAGE_MASK & (PUD_SIZE - 1);
+	else
+		info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 	info.align_offset = 0;
 	addr = vm_unmapped_area(&info);
 
 	/*
+	 * If failed with PUD_SIZE alignment, try again with huge page
+	 * size alignment.
+	 */
+	if ((addr & ~PAGE_MASK) && pud_size_align) {
+		info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+		addr = vm_unmapped_area(&info);
+	}
+
+	/*
 	 * A failed mmap() very likely causes application failure,
 	 * so fall back to the bottom-up function here. This scenario
 	 * can happen with large stack limits and large mmap()
@@ -115,7 +162,18 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 		info.flags = 0;
 		info.low_limit = TASK_UNMAPPED_BASE;
 		info.high_limit = TASK_SIZE;
+		if (pud_size_align)
+			info.align_mask = PAGE_MASK & (PUD_SIZE - 1);
 		addr = vm_unmapped_area(&info);
+
+		/*
+		 * If failed again with PUD_SIZE alignment, finally try with
+		 * huge page size alignment.
+		 */
+		if (addr & ~PAGE_MASK) {
+			info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+			addr = vm_unmapped_area(&info);
+		}
 	}
 
 	return addr;
-- 
2.4.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ