[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20161230155634.8692-4-dsafonov@virtuozzo.com>
Date: Fri, 30 Dec 2016 18:56:33 +0300
From: Dmitry Safonov <dsafonov@...tuozzo.com>
To: <linux-kernel@...r.kernel.org>
CC: <0x7f454c46@...il.com>, Dmitry Safonov <dsafonov@...tuozzo.com>,
"Thomas Gleixner" <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
Andy Lutomirski <luto@...nel.org>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
<x86@...nel.org>
Subject: [RFC 3/4] x86/mm: define TASK_SIZE as current->mm->task_size
Keep task's virtual address space size as mm_struct field which
exists for a long time - it's initialized in setup_new_exec()
depending on the new task's personality.
This way TASK_SIZE will always be the same as current->mm->task_size.
Previously, there could be an issue about different values of
TASK_SIZE and current->mm->task_size: e.g, a 32-bit process can unset
ADDR_LIMIT_3GB personality (with personality syscall) and
so TASK_SIZE will be 4Gb, which is larger than mm->task_size = 3Gb.
As TASK_SIZE *and* current->mm->task_size are used both in code
frequently, this difference creates a subtle situations, for example:
one can mmap addresses > 3Gb, but they will be hidden in
/proc/pid/pagemap as it checks mm->task_size.
I've moved initialization of mm->task_size earlier in setup_new_exec()
as arch_pick_mmap_layout() initializes mmap_legacy_base with
TASK_UNMAPPED_BASE, which depends on TASK_SIZE.
Signed-off-by: Dmitry Safonov <dsafonov@...tuozzo.com>
---
arch/x86/include/asm/processor.h | 17 +++++++++--------
arch/x86/um/asm/segment.h | 2 +-
arch/x86/xen/mmu.c | 4 ++--
fs/exec.c | 17 +++++++++++------
4 files changed, 23 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index dbc7dec5fa84..47ebde614f06 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -768,10 +768,8 @@ static inline void spin_lock_prefetch(const void *x)
/*
* User space process size: 3GB (default).
*/
-#define TASK_SIZE PAGE_OFFSET
-#define TASK_SIZE_MAX TASK_SIZE
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX STACK_TOP
+#define TASK_SIZE_MAX PAGE_OFFSET
+#define INIT_TASK_SIZE TASK_SIZE_MAX
#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \
@@ -817,12 +815,9 @@ static inline void spin_lock_prefetch(const void *x)
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
0xc0000000 : 0xFFFFe000)
-#define TASK_SIZE (current->personality & ADDR_LIMIT_32BIT ? \
+#define INIT_TASK_SIZE (current->personality & ADDR_LIMIT_32BIT ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX TASK_SIZE_MAX
-
#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \
.addr_limit = KERNEL_DS, \
@@ -833,6 +828,12 @@ extern unsigned long KSTK_ESP(struct task_struct *task);
#endif /* CONFIG_X86_64 */
+#define TASK_SIZE \
+ ((current->mm) ? current->mm->task_size : TASK_SIZE_MAX)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE_MAX
+
extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
diff --git a/arch/x86/um/asm/segment.h b/arch/x86/um/asm/segment.h
index 41dd5e1f3cd7..3a9aa9f050df 100644
--- a/arch/x86/um/asm/segment.h
+++ b/arch/x86/um/asm/segment.h
@@ -13,6 +13,6 @@ typedef struct {
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_DS MAKE_MM_SEG(~0UL)
-#define USER_DS MAKE_MM_SEG(TASK_SIZE)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
#endif
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 7d5afdb417cc..264ca3b7be58 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -830,7 +830,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
#else /* CONFIG_X86_32 */
#ifdef CONFIG_X86_PAE
/* Need to make sure unshared kernel PMD is pinnable */
- xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
+ xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE_MAX)]),
PT_PMD);
#endif
xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
@@ -949,7 +949,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
#ifdef CONFIG_X86_PAE
/* Need to make sure unshared kernel PMD is unpinned */
- xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
+ xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE_MAX)]),
PT_PMD);
#endif
diff --git a/fs/exec.c b/fs/exec.c
index e57946610733..826b73600fc2 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1303,8 +1303,19 @@ void would_dump(struct linux_binprm *bprm, struct file *file)
}
EXPORT_SYMBOL(would_dump);
+#ifndef INIT_TASK_SIZE
+#define INIT_TASK_SIZE TASK_SIZE
+#endif
+
void setup_new_exec(struct linux_binprm * bprm)
{
+
+ /* Set the new mm task size. We have to do that late because it may
+ * depend on TIF_32BIT which is only updated in flush_thread() on
+ * some architectures like powerpc
+ */
+ current->mm->task_size = INIT_TASK_SIZE;
+
arch_pick_mmap_layout(current->mm);
/* This is the point of no return */
@@ -1318,12 +1329,6 @@ void setup_new_exec(struct linux_binprm * bprm)
perf_event_exec();
__set_task_comm(current, kbasename(bprm->filename), true);
- /* Set the new mm task size. We have to do that late because it may
- * depend on TIF_32BIT which is only updated in flush_thread() on
- * some architectures like powerpc
- */
- current->mm->task_size = TASK_SIZE;
-
/* install the new credentials */
if (!uid_eq(bprm->cred->uid, current_euid()) ||
!gid_eq(bprm->cred->gid, current_egid())) {
--
2.11.0
Powered by blists - more mailing lists