lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 16 May 2017 10:16:40 +0900
From:   js1304@...il.com
To:     Andrew Morton <akpm@...ux-foundation.org>
Cc:     Andrey Ryabinin <aryabinin@...tuozzo.com>,
        Alexander Potapenko <glider@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>, kasan-dev@...glegroups.com,
        linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H . Peter Anvin" <hpa@...or.com>, kernel-team@....com,
        Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: [PATCH v1 02/11] mm/kasan: don't fetch the next shadow value speculartively

From: Joonsoo Kim <iamjoonsoo.kim@....com>

Fetching the next shadow value speculartively has pros and cons.
If shadow bytes are zero, we can exit the check with a single branch.
However, it could cause unaligned access. And, if the next shadow value
isn't zero, we need to do additional check. Next shadow value can be
non-zero due to various reasons.

Moreoever, following patch will introduce on-demand shadow memory
allocation/mapping and this speculartive fetch would cause more stale
TLB case.

So, I think that there is more side-effect than the benefit.
This patch removes it.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
---
 mm/kasan/kasan.c | 104 +++++++++++++++++++++++--------------------------------
 1 file changed, 44 insertions(+), 60 deletions(-)

diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 85ee45b0..97d3560 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -136,90 +136,74 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
 
 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
 {
-	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
-
-	if (unlikely(*shadow_addr)) {
-		if (memory_is_poisoned_1(addr + 1))
-			return true;
-
-		/*
-		 * If single shadow byte covers 2-byte access, we don't
-		 * need to do anything more. Otherwise, test the first
-		 * shadow byte.
-		 */
-		if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
-			return false;
+	if (unlikely(memory_is_poisoned_1(addr)))
+		return true;
 
-		return unlikely(*(u8 *)shadow_addr);
-	}
+	/*
+	 * If single shadow byte covers 2-byte access, we don't
+	 * need to do anything more. Otherwise, test the first
+	 * shadow byte.
+	 */
+	if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
+		return false;
 
-	return false;
+	return memory_is_poisoned_1(addr + 1);
 }
 
 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
 {
-	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
-
-	if (unlikely(*shadow_addr)) {
-		if (memory_is_poisoned_1(addr + 3))
-			return true;
-
-		/*
-		 * If single shadow byte covers 4-byte access, we don't
-		 * need to do anything more. Otherwise, test the first
-		 * shadow byte.
-		 */
-		if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
-			return false;
+	if (unlikely(memory_is_poisoned_1(addr + 3)))
+		return true;
 
-		return unlikely(*(u8 *)shadow_addr);
-	}
+	/*
+	 * If single shadow byte covers 4-byte access, we don't
+	 * need to do anything more. Otherwise, test the first
+	 * shadow byte.
+	 */
+	if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
+		return false;
 
-	return false;
+	return memory_is_poisoned_1(addr);
 }
 
 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
 {
-	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
 
-	if (unlikely(*shadow_addr)) {
-		if (memory_is_poisoned_1(addr + 7))
-			return true;
+	if (unlikely(*shadow_addr))
+		return true;
 
-		/*
-		 * If single shadow byte covers 8-byte access, we don't
-		 * need to do anything more. Otherwise, test the first
-		 * shadow byte.
-		 */
-		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
-			return false;
+	/*
+	 * If single shadow byte covers 8-byte access, we don't
+	 * need to do anything more. Otherwise, test the first
+	 * shadow byte.
+	 */
+	if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+		return false;
 
-		return unlikely(*(u8 *)shadow_addr);
-	}
+	if (unlikely(memory_is_poisoned_1(addr + 7)))
+		return true;
 
 	return false;
 }
 
 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
 {
-	u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
-
-	if (unlikely(*shadow_addr)) {
-		u16 shadow_first_bytes = *(u16 *)shadow_addr;
+	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 
-		if (unlikely(shadow_first_bytes))
-			return true;
+	if (unlikely(*shadow_addr))
+		return true;
 
-		/*
-		 * If two shadow bytes covers 16-byte access, we don't
-		 * need to do anything more. Otherwise, test the last
-		 * shadow byte.
-		 */
-		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
-			return false;
+	/*
+	 * If two shadow bytes covers 16-byte access, we don't
+	 * need to do anything more. Otherwise, test the last
+	 * shadow byte.
+	 */
+	if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+		return false;
 
-		return memory_is_poisoned_1(addr + 15);
-	}
+	if (unlikely(memory_is_poisoned_1(addr + 15)))
+		return true;
 
 	return false;
 }
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ