lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 13 Sep 2019 05:31:10 +0800
From:   Wei Yang <richardw.yang@...ux.intel.com>
To:     viro@...iv.linux.org.uk
Cc:     linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
        Wei Yang <richardw.yang@...ux.intel.com>
Subject: [PATCH 3/3] fs/userfaultfd.c: wrap cheching huge page alignment into a helper

There are three places checking whether one address is huge page
aligned.

This patch just makes a helper function to wrap it up.

Signed-off-by: Wei Yang <richardw.yang@...ux.intel.com>
---
 fs/userfaultfd.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 70c0e0ef01d7..d8665ffdd576 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1296,6 +1296,16 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma)
 		vma_is_shmem(vma);
 }
 
+static inline bool addr_huge_page_aligned(unsigned long addr,
+					  struct vm_area_struct *vma)
+{
+	unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
+
+	if (addr & (vma_hpagesize - 1))
+		return false;
+	return true;
+}
+
 static int userfaultfd_register(struct userfaultfd_ctx *ctx,
 				unsigned long arg)
 {
@@ -1363,12 +1373,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
 	 * If the first vma contains huge pages, make sure start address
 	 * is aligned to huge page size.
 	 */
-	if (is_vm_hugetlb_page(vma)) {
-		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
-
-		if (start & (vma_hpagesize - 1))
-			goto out_unlock;
-	}
+	if (is_vm_hugetlb_page(vma) && !addr_huge_page_aligned(start, vma))
+		goto out_unlock;
 
 	/*
 	 * Search for not compatible vmas.
@@ -1403,11 +1409,9 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
 		 * check alignment.
 		 */
 		if (end <= cur->vm_end && is_vm_hugetlb_page(cur)) {
-			unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
-
 			ret = -EINVAL;
 
-			if (end & (vma_hpagesize - 1))
+			if (!addr_huge_page_aligned(end, cur))
 				goto out_unlock;
 		}
 
@@ -1551,12 +1555,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
 	 * If the first vma contains huge pages, make sure start address
 	 * is aligned to huge page size.
 	 */
-	if (is_vm_hugetlb_page(vma)) {
-		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
-
-		if (start & (vma_hpagesize - 1))
-			goto out_unlock;
-	}
+	if (is_vm_hugetlb_page(vma) && !addr_huge_page_aligned(start, vma))
+		goto out_unlock;
 
 	/*
 	 * Search for not compatible vmas.
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ