lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Wed, 27 Dec 2017 13:44:34 +0100
From:   Dmitry Vyukov <dvyukov@...gle.com>
To:     akpm@...ux-foundation.org, aryabinin@...tuozzo.com
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        kasan-dev@...glegroups.com, Dmitry Vyukov <dvyukov@...gle.com>
Subject: [PATCH 3/5] kasan: detect invalid frees for large mempool objects

Detect frees of pointers into middle of mempool objects.

I did a one-off test, but it turned out to be very tricky,
so I reverted it. First, mempool does not call kasan_poison_kfree()
unless allocation function fails. I stubbed an allocation function
to fail on second and subsequent allocations. But then mempool stopped
to call kasan_poison_kfree() at all, because it does it only when
allocation function is mempool_kmalloc(). We could support this
special failing test allocation function in mempool, but it also
can't live with kasan tests, because these are in a module.

Signed-off-by: Dmitry Vyukov <dvyukov@...gle.com>
Cc: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org
Cc: kasan-dev@...glegroups.com
---
 include/linux/kasan.h |  4 ++--
 mm/kasan/kasan.c      | 11 ++++++++---
 mm/mempool.c          |  6 +++---
 3 files changed, 13 insertions(+), 8 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index f0d13c30acc6..fc45f8952d1e 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -57,7 +57,7 @@ void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
 
 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
 void kasan_kfree_large(void *ptr, unsigned long ip);
-void kasan_poison_kfree(void *ptr);
+void kasan_poison_kfree(void *ptr, unsigned long ip);
 void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
 		  gfp_t flags);
 void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
@@ -109,7 +109,7 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache,
 
 static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
 static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
-static inline void kasan_poison_kfree(void *ptr) {}
+static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
 static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
 				size_t size, gfp_t flags) {}
 static inline void kasan_krealloc(const void *object, size_t new_size,
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 32f555ded938..77c103748728 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -588,17 +588,22 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags)
 		kasan_kmalloc(page->slab_cache, object, size, flags);
 }
 
-void kasan_poison_kfree(void *ptr)
+void kasan_poison_kfree(void *ptr, unsigned long ip)
 {
 	struct page *page;
 
 	page = virt_to_head_page(ptr);
 
-	if (unlikely(!PageSlab(page)))
+	if (unlikely(!PageSlab(page))) {
+		if (ptr != page_address(page)) {
+			kasan_report_invalid_free(ptr, ip);
+			return;
+		}
 		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
 				KASAN_FREE_PAGE);
-	else
+	} else {
 		kasan_poison_slab_free(page->slab_cache, ptr);
+	}
 }
 
 void kasan_kfree_large(void *ptr, unsigned long ip)
diff --git a/mm/mempool.c b/mm/mempool.c
index 7d8c5a0010a2..5c9dce34719b 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -103,10 +103,10 @@ static inline void poison_element(mempool_t *pool, void *element)
 }
 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
 
-static void kasan_poison_element(mempool_t *pool, void *element)
+static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 {
 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
-		kasan_poison_kfree(element);
+		kasan_poison_kfree(element, _RET_IP_);
 	if (pool->alloc == mempool_alloc_pages)
 		kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
@@ -119,7 +119,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
 		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 }
 
-static void add_element(mempool_t *pool, void *element)
+static __always_inline void add_element(mempool_t *pool, void *element)
 {
 	BUG_ON(pool->curr_nr >= pool->min_nr);
 	poison_element(pool, element);
-- 
2.15.1.620.gb9897f4670-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ