lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1462838713-31343-10-git-send-email-kamal@canonical.com>
Date:	Mon,  9 May 2016 17:04:28 -0700
From:	Kamal Mostafa <kamal@...onical.com>
To:	linux-kernel@...r.kernel.org, stable@...r.kernel.org,
	kernel-team@...ts.ubuntu.com
Cc:	Martin Schwidefsky <schwidefsky@...ibm.com>,
	Kamal Mostafa <kamal@...onical.com>
Subject: [PATCH 3.19.y-ckt 09/54] s390: avoid z13 cache aliasing

3.19.8-ckt21 -stable review patch.  If anyone has any objections, please let me know.

---8<------------------------------------------------------------

From: Martin Schwidefsky <schwidefsky@...ibm.com>

commit 1f6b83e5e4d3aed46eac1d219322fba9c7341cd8 upstream.

Avoid cache aliasing on z13 by aligning shared objects to multiples
of 512K. The virtual addresses of a page from a shared file needs
to have identical bits in the range 2^12 to 2^18.

Signed-off-by: Martin Schwidefsky <schwidefsky@...ibm.com>
Signed-off-by: Kamal Mostafa <kamal@...onical.com>
---
 arch/s390/include/asm/elf.h     |   8 ++-
 arch/s390/include/asm/pgtable.h |   4 ++
 arch/s390/kernel/process.c      |  10 ---
 arch/s390/mm/init.c             |   9 ++-
 arch/s390/mm/mmap.c             | 142 +++++++++++++++++++++++++++++++++++++++-
 5 files changed, 155 insertions(+), 18 deletions(-)

diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index f6e43d3..c9df40b 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -163,8 +163,8 @@ extern unsigned int vdso_enabled;
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
-extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE		(randomize_et_dyn(STACK_TOP / 3 * 2))
+extern unsigned long randomize_et_dyn(void);
+#define ELF_ET_DYN_BASE		randomize_et_dyn()
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
@@ -209,7 +209,9 @@ do {								\
 } while (0)
 #endif /* CONFIG_COMPAT */
 
-#define STACK_RND_MASK	0x7ffUL
+extern unsigned long mmap_rnd_mask;
+
+#define STACK_RND_MASK	(mmap_rnd_mask)
 
 #define ARCH_DLINFO							    \
 do {									    \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3883ee2..e08fbcd 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1779,6 +1779,10 @@ extern int s390_enable_sie(void);
 extern int s390_enable_skey(void);
 extern void s390_reset_cmma(struct mm_struct *mm);
 
+/* s390 has a private copy of get unmapped area to deal with cache synonyms */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
 /*
  * No page table caches to initialise
  */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 0f8e8f5..9be2434 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -243,13 +243,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
 	ret = PAGE_ALIGN(mm->brk + brk_rnd());
 	return (ret > mm->brk) ? ret : mm->brk;
 }
-
-unsigned long randomize_et_dyn(unsigned long base)
-{
-	unsigned long ret;
-
-	if (!(current->flags & PF_RANDOMIZE))
-		return base;
-	ret = PAGE_ALIGN(base + brk_rnd());
-	return (ret > base) ? ret : base;
-}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7235e0..d35b151 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -71,13 +71,16 @@ static void __init setup_zero_pages(void)
 		break;
 	case 0x2827:	/* zEC12 */
 	case 0x2828:	/* zEC12 */
-	default:
 		order = 5;
 		break;
+	case 0x2964:	/* z13 */
+	default:
+		order = 7;
+		break;
 	}
 	/* Limit number of empty zero pages for small memory sizes */
-	if (order > 2 && totalram_pages <= 16384)
-		order = 2;
+	while (order > 2 && (totalram_pages >> 10) < (1UL << order))
+		order--;
 
 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 	if (!empty_zero_page)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 9b436c2..d008f638 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -28,8 +28,12 @@
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/compat.h>
+#include <linux/security.h>
 #include <asm/pgalloc.h>
 
+unsigned long mmap_rnd_mask;
+unsigned long mmap_align_mask;
+
 static unsigned long stack_maxrandom_size(void)
 {
 	if (!(current->flags & PF_RANDOMIZE))
@@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
 {
 	if (!(current->flags & PF_RANDOMIZE))
 		return 0;
-	/* 8MB randomization for mmap_base */
-	return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+	if (is_32bit_task())
+		return (get_random_int() & 0x7ff) << PAGE_SHIFT;
+	else
+		return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
 }
 
 static unsigned long mmap_base_legacy(void)
@@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void)
 	return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
 }
 
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	struct vm_unmapped_area_info info;
+	int do_color_align;
+
+	if (len > TASK_SIZE - mmap_min_addr)
+		return -ENOMEM;
+
+	if (flags & MAP_FIXED)
+		return addr;
+
+	if (addr) {
+		addr = PAGE_ALIGN(addr);
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+		    (!vma || addr + len <= vma->vm_start))
+			return addr;
+	}
+
+	do_color_align = 0;
+	if (filp || (flags & MAP_SHARED))
+		do_color_align = !is_32bit_task();
+
+	info.flags = 0;
+	info.length = len;
+	info.low_limit = mm->mmap_base;
+	info.high_limit = TASK_SIZE;
+	info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+	info.align_offset = pgoff << PAGE_SHIFT;
+	return vm_unmapped_area(&info);
+}
+
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+			  const unsigned long len, const unsigned long pgoff,
+			  const unsigned long flags)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	unsigned long addr = addr0;
+	struct vm_unmapped_area_info info;
+	int do_color_align;
+
+	/* requested length too big for entire address space */
+	if (len > TASK_SIZE - mmap_min_addr)
+		return -ENOMEM;
+
+	if (flags & MAP_FIXED)
+		return addr;
+
+	/* requesting a specific address */
+	if (addr) {
+		addr = PAGE_ALIGN(addr);
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+				(!vma || addr + len <= vma->vm_start))
+			return addr;
+	}
+
+	do_color_align = 0;
+	if (filp || (flags & MAP_SHARED))
+		do_color_align = !is_32bit_task();
+
+	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+	info.length = len;
+	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+	info.high_limit = mm->mmap_base;
+	info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+	info.align_offset = pgoff << PAGE_SHIFT;
+	addr = vm_unmapped_area(&info);
+
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	if (addr & ~PAGE_MASK) {
+		VM_BUG_ON(addr != -ENOMEM);
+		info.flags = 0;
+		info.low_limit = TASK_UNMAPPED_BASE;
+		info.high_limit = TASK_SIZE;
+		addr = vm_unmapped_area(&info);
+	}
+
+	return addr;
+}
+
+unsigned long randomize_et_dyn(void)
+{
+	unsigned long base;
+
+	base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
+	return base + mmap_rnd();
+}
+
 #ifndef CONFIG_64BIT
 
 /*
@@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 	}
 }
 
+static int __init setup_mmap_rnd(void)
+{
+	struct cpuid cpu_id;
+
+	get_cpu_id(&cpu_id);
+	switch (cpu_id.machine) {
+	case 0x9672:
+	case 0x2064:
+	case 0x2066:
+	case 0x2084:
+	case 0x2086:
+	case 0x2094:
+	case 0x2096:
+	case 0x2097:
+	case 0x2098:
+	case 0x2817:
+	case 0x2818:
+	case 0x2827:
+	case 0x2828:
+		mmap_rnd_mask = 0x7ffUL;
+		mmap_align_mask = 0UL;
+		break;
+	case 0x2964:	/* z13 */
+	default:
+		mmap_rnd_mask = 0x3ff80UL;
+		mmap_align_mask = 0x7fUL;
+		break;
+	}
+	return 0;
+}
+early_initcall(setup_mmap_rnd);
+
 #endif
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ