[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-id: <1404905415-9046-9-git-send-email-a.ryabinin@samsung.com>
Date: Wed, 09 Jul 2014 15:30:02 +0400
From: Andrey Ryabinin <a.ryabinin@...sung.com>
To: linux-kernel@...r.kernel.org
Cc: Dmitry Vyukov <dvyukov@...gle.com>,
Konstantin Serebryany <kcc@...gle.com>,
Alexey Preobrazhensky <preobr@...gle.com>,
Andrey Konovalov <adech.fo@...il.com>,
Yuri Gribov <tetra2005@...il.com>,
Konstantin Khlebnikov <koct9i@...il.com>,
Sasha Levin <sasha.levin@...cle.com>,
Michal Marek <mmarek@...e.cz>,
Russell King <linux@....linux.org.uk>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kbuild@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
x86@...nel.org, linux-mm@...ck.org,
Andrey Ryabinin <a.ryabinin@...sung.com>
Subject: [RFC/PATCH RESEND -next 08/21] mm: page_alloc: add kasan hooks on
alloc and free pathes
Add kernel address sanitizer hooks to mark allocated page's addresses
as accessible in corresponding shadow region.
Mark freed pages as unaccessible.
Signed-off-by: Andrey Ryabinin <a.ryabinin@...sung.com>
---
include/linux/kasan.h | 6 ++++++
mm/Makefile | 2 ++
mm/kasan/kasan.c | 18 ++++++++++++++++++
mm/kasan/kasan.h | 1 +
mm/kasan/report.c | 7 +++++++
mm/page_alloc.c | 4 ++++
6 files changed, 38 insertions(+)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 7efc3eb..4adc0a1 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -17,6 +17,9 @@ void kasan_disable_local(void);
void kasan_alloc_shadow(void);
void kasan_init_shadow(void);
+void kasan_alloc_pages(struct page *page, unsigned int order);
+void kasan_free_pages(struct page *page, unsigned int order);
+
#else /* CONFIG_KASAN */
static inline void unpoison_shadow(const void *address, size_t size) {}
@@ -28,6 +31,9 @@ static inline void kasan_disable_local(void) {}
static inline void kasan_init_shadow(void) {}
static inline void kasan_alloc_shadow(void) {}
+static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
+static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+
#endif /* CONFIG_KASAN */
#endif /* LINUX_KASAN_H */
diff --git a/mm/Makefile b/mm/Makefile
index dbe9a22..6a9c3f8 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -2,6 +2,8 @@
# Makefile for the linux memory manager.
#
+KASAN_SANITIZE_page_alloc.o := n
+
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := gup.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index e2cd345..109478e 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -177,6 +177,24 @@ void __init kasan_init_shadow(void)
}
}
+void kasan_alloc_pages(struct page *page, unsigned int order)
+{
+ if (unlikely(!kasan_initialized))
+ return;
+
+ if (likely(page && !PageHighMem(page)))
+ unpoison_shadow(page_address(page), PAGE_SIZE << order);
+}
+
+void kasan_free_pages(struct page *page, unsigned int order)
+{
+ if (unlikely(!kasan_initialized))
+ return;
+
+ if (likely(!PageHighMem(page)))
+ poison_shadow(page_address(page), PAGE_SIZE << order, KASAN_FREE_PAGE);
+}
+
void *kasan_memcpy(void *dst, const void *src, size_t len)
{
if (unlikely(len == 0))
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 711ae4f..be9597e 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -5,6 +5,7 @@
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
+#define KASAN_FREE_PAGE 0xFF /* page was freed */
#define KASAN_SHADOW_GAP 0xF9 /* address belongs to shadow memory */
struct access_info {
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 2430e05..6ef9e57 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -46,6 +46,9 @@ static void print_error_description(struct access_info *info)
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
bug_type = "buffer overflow";
break;
+ case KASAN_FREE_PAGE:
+ bug_type = "use after free";
+ break;
case KASAN_SHADOW_GAP:
bug_type = "wild memory access";
break;
@@ -67,6 +70,10 @@ static void print_address_description(struct access_info *info)
page = virt_to_page(info->access_addr);
switch (shadow_val) {
+ case KASAN_FREE_PAGE:
+ dump_page(page, "kasan error");
+ dump_stack();
+ break;
case KASAN_SHADOW_GAP:
pr_err("No metainfo is available for this access.\n");
dump_stack();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8c9eeec..67833d1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
#include <linux/page-debug-flags.h>
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
+#include <linux/kasan.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -747,6 +748,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
+ kasan_free_pages(page, order);
if (PageAnon(page))
page->mapping = NULL;
@@ -2807,6 +2809,7 @@ out:
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
+ kasan_alloc_pages(page, order);
return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -6415,6 +6418,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (end != outer_end)
free_contig_range(end, outer_end - end);
+ kasan_alloc_pages(pfn_to_page(start), end - start);
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
--
1.8.5.5
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists