[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180605061416.18690-4-liuwenliang@huawei.com>
Date: Tue, 5 Jun 2018 14:14:13 +0800
From: Abbott Liu <liuwenliang@...wei.com>
To: <aryabinin@...tuozzo.com>, <glider@...gle.com>,
<dvyukov@...gle.com>, <corbet@....net>, <linux@...linux.org.uk>,
<christoffer.dall@....com>, <marc.zyngier@....com>,
<arnd@...db.de>, <nicolas.pitre@...aro.org>,
<vladimir.murzin@....com>, <keescook@...omium.org>,
<jinb.park7@...il.com>, <alexandre.belloni@...tlin.com>,
<ard.biesheuvel@...aro.org>, <daniel.lezcano@...aro.org>,
<pombredanne@...b.com>, <kstewart@...uxfoundation.org>,
<liuwenliang@...wei.com>, <rob@...dley.net>,
<gregkh@...uxfoundation.org>, <f.fainelli@...il.com>,
<mawilcox@...rosoft.com>, <akpm@...ux-foundation.org>,
<mark.rutland@....com>, <catalin.marinas@....com>,
<yamada.masahiro@...ionext.com>, <alexander.levin@...izon.com>,
<tglx@...utronix.de>, <thgarnie@...gle.com>, <dhowells@...hat.com>,
<geert@...ux-m68k.org>, <andre.przywara@....com>,
<julien.thierry@....com>, <drjones@...hat.com>,
<philip@....systems>, <opendmb@...il.com>, <mhocko@...e.com>,
<kirill.shutemov@...ux.intel.com>, <kasan-dev@...glegroups.com>,
<linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<kvmarm@...ts.cs.columbia.edu>
CC: <a.ryabinin@...sung.com>, <ryabinin.a.a@...il.com>
Subject: [PATCH v5 3/6] Replace memory function for kasan
From: Andrey Ryabinin <aryabinin@...tuozzo.com>
Functions like memset/memmove/memcpy do a lot of memory accesses.
If bad pointer passed to one of these function it is important
to catch this. Compiler's instrumentation cannot do this since
these functions are written in assembly.
KASan replaces memory functions with manually instrumented variants.
Original functions declared as weak symbols so strong definitions
in mm/kasan/kasan.c could replace them. Original functions have aliases
with '__' prefix in name, so we could call non-instrumented variant
if needed.
We must use __memcpy/__memset to replace memcpy/memset when we copy
.data to RAM and when we clear .bss, because kasan_early_init can't
be called before the initialization of .data and .bss.
Reported-by: Russell King - ARM Linux <linux@...linux.org.uk>
Acked-by: Florian Fainelli <f.fainelli@...il.com>
Tested-by: Florian Fainelli <f.fainelli@...il.com>
Tested-by: Joel Stanley <joel@....id.au>
Tested-by: Abbott Liu <liuwenliang@...wei.com>
Signed-off-by: Abbott Liu <liuwenliang@...wei.com>
---
arch/arm/boot/compressed/decompress.c | 2 ++
arch/arm/boot/compressed/libfdt_env.h | 2 ++
arch/arm/include/asm/string.h | 17 +++++++++++++++++
arch/arm/kernel/head-common.S | 4 ++--
arch/arm/lib/memcpy.S | 3 +++
arch/arm/lib/memmove.S | 5 ++++-
arch/arm/lib/memset.S | 3 +++
7 files changed, 33 insertions(+), 3 deletions(-)
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index c16c182..c10c35b 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -46,8 +46,10 @@ extern int memcmp(const void *cs, const void *ct, size_t count);
#endif
#ifdef CONFIG_KERNEL_XZ
+#ifndef CONFIG_KASAN
#define memmove memmove
#define memcpy memcpy
+#endif
#include "../../../../lib/decompress_unxz.c"
#endif
diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
index 0743781..736ed36 100644
--- a/arch/arm/boot/compressed/libfdt_env.h
+++ b/arch/arm/boot/compressed/libfdt_env.h
@@ -17,4 +17,6 @@ typedef __be64 fdt64_t;
#define fdt64_to_cpu(x) be64_to_cpu(x)
#define cpu_to_fdt64(x) cpu_to_be64(x)
+#undef memset
+
#endif
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index 111a1d8..1f9016b 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -15,15 +15,18 @@ extern char * strchr(const char * s, int c);
#define __HAVE_ARCH_MEMCPY
extern void * memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMMOVE
extern void * memmove(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMCHR
extern void * memchr(const void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void * memset(void *, int, __kernel_size_t);
+extern void *__memset(void *s, int c, __kernel_size_t n);
#define __HAVE_ARCH_MEMSET32
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
@@ -39,4 +42,18 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
return __memset64(p, v, n * 8, v >> 32);
}
+
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#endif
+
#endif
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 6e0375e..c79b829 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -99,7 +99,7 @@ __mmap_switched:
THUMB( ldmia r4!, {r0, r1, r2, r3} )
THUMB( mov sp, r3 )
sub r2, r2, r1
- bl memcpy @ copy .data to RAM
+ bl __memcpy @ copy .data to RAM
#endif
ARM( ldmia r4!, {r0, r1, sp} )
@@ -107,7 +107,7 @@ __mmap_switched:
THUMB( mov sp, r3 )
sub r2, r1, r0
mov r1, #0
- bl memset @ clear .bss
+ bl __memset @ clear .bss
ldmia r4, {r0, r1, r2, r3}
str r9, [r0] @ Save processor ID
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 64111bd..79a83f8 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -61,6 +61,8 @@
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+.weak memcpy
+ENTRY(__memcpy)
ENTRY(mmiocpy)
ENTRY(memcpy)
@@ -68,3 +70,4 @@ ENTRY(memcpy)
ENDPROC(memcpy)
ENDPROC(mmiocpy)
+ENDPROC(__memcpy)
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 69a9d47..313db6c 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -27,12 +27,14 @@
* occurring in the opposite direction.
*/
+.weak memmove
+ENTRY(__memmove)
ENTRY(memmove)
UNWIND( .fnstart )
subs ip, r0, r1
cmphi r2, ip
- bls memcpy
+ bls __memcpy
stmfd sp!, {r0, r4, lr}
UNWIND( .fnend )
@@ -225,3 +227,4 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8
ENDPROC(memmove)
+ENDPROC(__memmove)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index ed6d35d..64aa06a 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -16,6 +16,8 @@
.text
.align 5
+.weak memset
+ENTRY(__memset)
ENTRY(mmioset)
ENTRY(memset)
UNWIND( .fnstart )
@@ -135,6 +137,7 @@ UNWIND( .fnstart )
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
+ENDPROC(__memset)
ENTRY(__memset32)
UNWIND( .fnstart )
--
2.9.0
Powered by blists - more mailing lists