lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20111103215421.AF532122186@elm.corp.google.com>
Date:	Thu,  3 Nov 2011 14:54:21 -0700 (PDT)
From:	kenchen@...gle.com (Ken Chen)
To:	akpm@...ux-foundation.org, linux-kernel@...r.kernel.org,
	mel@....ul.ie
Subject: [patch] hugetlb: add hugepage reservation upon mremap expansion.

hugetlb: add hugepage reservation upon mremap expansion

hugetlb page has a semantics that it reserves pages up front at the time
of mmap.  We need to extend the same reservation scheme for the mremap
expansion case.

Signed-off-by: Ken Chen <kenchen@...gle.com>

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c36d851..5d22933 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -28,6 +28,8 @@ int hugetlb_mempolicy_sysctl_handler(
 
 int move_hugetlb_page_tables(struct vm_area_struct *vma, unsigned long old_addr,
 			     unsigned long new_addr, unsigned long len);
+int hugetlb_expand_resv(struct vm_area_struct *vma, unsigned long old_len,
+			 unsigned long new_len);
 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
 int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
 			struct page **, struct vm_area_struct **,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6f5b56f..1f8e333 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2250,6 +2250,34 @@ int move_hugetlb_page_tables(
 	return len + old_addr - old_end;
 }
 
+int hugetlb_expand_resv(struct vm_area_struct *vma, unsigned long old_len,
+			 unsigned long new_len)
+{
+	struct hstate *h = hstate_vma(vma);
+	int ret = 0;
+
+	if (old_len >= new_len)
+		goto out;
+
+	if (vma->vm_flags & VM_MAYSHARE) {
+		struct inode *inode = vma->vm_file->f_mapping->host;
+		unsigned long from, to, vm_flags;
+
+		from = (vma->vm_pgoff >> huge_page_order(h)) +
+			(old_len >> huge_page_shift(h));
+		to   = (vma->vm_pgoff >> huge_page_order(h)) +
+			(new_len >> huge_page_shift(h));
+		vm_flags = vma->vm_flags;
+
+		ret = hugetlb_reserve_pages(inode, from, to, vma, vm_flags);
+	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+		long expand = (new_len - old_len) >> huge_page_shift(h);
+		ret = hugetlb_acct_memory(h, expand);
+	}
+out:
+	return ret;
+}
+
 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 			    unsigned long end, struct page *ref_page)
 {
diff --git a/mm/mremap.c b/mm/mremap.c
index 9f6c903..010f93a 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -410,7 +410,7 @@ unsigned long do_mremap(unsigned long addr,
 	unsigned long flags, unsigned long new_addr)
 {
 	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma;
+	struct vm_area_struct *vma = NULL;
 	unsigned long ret = -EINVAL;
 	unsigned long charged = 0;
 
@@ -525,6 +525,15 @@ unsigned long do_mremap(unsigned long addr,
 out:
 	if (ret & ~PAGE_MASK)
 		vm_unacct_memory(charged);
+	else if (vma && is_vm_hugetlb_page(vma)) {
+		unsigned long ret2;
+
+		ret2 = hugetlb_expand_resv(vma, old_len, new_len);
+		if (ret2) {
+			ret = ret2;
+			vm_unacct_memory(charged);
+		}
+	}
 	return ret;
 }
 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ