[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-7d5c038a03acfbedeb3ffef9da1814a47191f6f1@git.kernel.org>
Date: Tue, 14 Mar 2017 08:16:08 -0700
From: tip-bot for Dmitry Safonov <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: hpa@...or.com, mingo@...nel.org, kirill.shutemov@...ux.intel.com,
bp@...e.de, gorcunov@...nvz.org, luto@...nel.org,
dsafonov@...tuozzo.com, linux-kernel@...r.kernel.org,
tglx@...utronix.de, xiaolong.ye@...el.com
Subject: [tip:x86/mm] x86/hugetlb: Adjust to the new native/compat mmap
bases
Commit-ID: 7d5c038a03acfbedeb3ffef9da1814a47191f6f1
Gitweb: http://git.kernel.org/tip/7d5c038a03acfbedeb3ffef9da1814a47191f6f1
Author: Dmitry Safonov <dsafonov@...tuozzo.com>
AuthorDate: Tue, 14 Mar 2017 14:41:26 +0300
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitDate: Tue, 14 Mar 2017 16:11:47 +0100
x86/hugetlb: Adjust to the new native/compat mmap bases
Commit 1b028f784e8c introduced two mmap() bases for 32-bit syscalls and for
64-bit syscalls. The mmap() code in x86 was modified to handle the
separation, but the patch series missed to update the hugetlb code.
As a consequence a 32bit application mapping a file on hugetlbfs uses the
64-bit mmap base for address space allocation, which fails.
Adjust the hugetlb mapping code to use the proper bases depending on the
syscall invocation mode (64-bit or compat).
[ tglx: Massaged changelog ]
Fixes: commit 1b028f784e8c ("x86/mm: Introduce mmap_compat_base() for 32-bit mmap()")
Reported-by: kernel test robot <xiaolong.ye@...el.com>
Signed-off-by: Dmitry Safonov <dsafonov@...tuozzo.com>
Cc: 0x7f454c46@...il.com
Cc: linux-mm@...ck.org
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Cyrill Gorcunov <gorcunov@...nvz.org>
Cc: Borislav Petkov <bp@...e.de>
Cc: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Link: http://lkml.kernel.org/r/20170314114126.9280-1-dsafonov@virtuozzo.com
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/include/asm/elf.h | 1 +
arch/x86/kernel/sys_x86_64.c | 12 ------------
arch/x86/mm/hugetlbpage.c | 9 ++++++---
arch/x86/mm/mmap.c | 14 ++++++++++++++
4 files changed, 21 insertions(+), 15 deletions(-)
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index ac5be5b..d4d3ed4 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -305,6 +305,7 @@ static inline int mmap_is_ia32(void)
extern unsigned long tasksize_32bit(void);
extern unsigned long tasksize_64bit(void);
+extern unsigned long get_mmap_base(int is_legacy);
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 63e89df..207b8f2 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -100,18 +100,6 @@ out:
return error;
}
-static unsigned long get_mmap_base(int is_legacy)
-{
- struct mm_struct *mm = current->mm;
-
-#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- if (in_compat_syscall())
- return is_legacy ? mm->mmap_compat_legacy_base
- : mm->mmap_compat_base;
-#endif
- return is_legacy ? mm->mmap_legacy_base : mm->mmap_base;
-}
-
static void find_start_end(unsigned long flags, unsigned long *begin,
unsigned long *end)
{
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index c5066a2..a50f460 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -16,6 +16,8 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
+#include <asm/elf.h>
+#include <asm/compat.h>
#if 0 /* This is just for testing */
struct page *
@@ -82,8 +84,9 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
info.flags = 0;
info.length = len;
- info.low_limit = current->mm->mmap_legacy_base;
- info.high_limit = TASK_SIZE;
+ info.low_limit = get_mmap_base(1);
+ info.high_limit = in_compat_syscall() ?
+ tasksize_32bit() : tasksize_64bit();
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
@@ -100,7 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
- info.high_limit = current->mm->mmap_base;
+ info.high_limit = get_mmap_base(0);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 529ab79..c3ea70f 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -31,6 +31,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <asm/elf.h>
+#include <asm/compat.h>
struct va_alignment __read_mostly va_align = {
.flags = -1,
@@ -153,6 +154,19 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
#endif
}
+unsigned long get_mmap_base(int is_legacy)
+{
+ struct mm_struct *mm = current->mm;
+
+#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
+ if (in_compat_syscall()) {
+ return is_legacy ? mm->mmap_compat_legacy_base
+ : mm->mmap_compat_base;
+ }
+#endif
+ return is_legacy ? mm->mmap_legacy_base : mm->mmap_base;
+}
+
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_MPX)
Powered by blists - more mailing lists