[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <574F0BB6.1040400@virtuozzo.com>
Date: Wed, 1 Jun 2016 19:22:14 +0300
From: Andrey Ryabinin <aryabinin@...tuozzo.com>
To: Alexander Potapenko <glider@...gle.com>, <adech.fo@...il.com>,
<cl@...ux.com>, <dvyukov@...gle.com>, <akpm@...ux-foundation.org>,
<rostedt@...dmis.org>, <iamjoonsoo.kim@....com>,
<js1304@...il.com>, <kcc@...gle.com>, <kuthonuzo.luruo@....com>
CC: <kasan-dev@...glegroups.com>, <linux-mm@...ck.org>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] mm: kasan: don't touch metadata in
kasan_[un]poison_element()
On 06/01/2016 03:53 PM, Alexander Potapenko wrote:
> To avoid draining the mempools, KASAN shouldn't put the mempool elements
> into the quarantine upon mempool_free().
Correct, but unfortunately this patch doesn't fix that.
> It shouldn't store
> allocation/deallocation stacks upon mempool_alloc()/mempool_free() either.
Why not?
> Therefore make kasan_[un]poison_element() just change the shadow memory,
> not the metadata.
>
> Signed-off-by: Alexander Potapenko <glider@...gle.com>
> Reported-by: Kuthonuzo Luruo <kuthonuzo.luruo@....com>
> ---
[...]
> +void kasan_slab_alloc(struct kmem_cache *cache, void *object,
> + bool just_unpoison, gfp_t flags)
> {
> - kasan_kmalloc(cache, object, cache->object_size, flags);
> + if (just_unpoison)
This set to 'false' in all call sites.
> + kasan_unpoison_shadow(object, cache->object_size);
> + else
> + kasan_kmalloc(cache, object, cache->object_size, flags);
> }
>
> void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
> @@ -611,6 +615,31 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
> KASAN_PAGE_REDZONE);
> }
>
> +void kasan_unpoison_kmalloc(const void *object, size_t size, gfp_t flags)
> +{
> + struct page *page;
> + unsigned long redzone_start;
> + unsigned long redzone_end;
> +
> + if (unlikely(object == ZERO_SIZE_PTR) || (object == NULL))
> + return;
> +
> + page = virt_to_head_page(object);
> + redzone_start = round_up((unsigned long)(object + size),
> + KASAN_SHADOW_SCALE_SIZE);
> +
> + if (unlikely(!PageSlab(page)))
> + redzone_end = (unsigned long)object +
> + (PAGE_SIZE << compound_order(page));
> + else
> + redzone_end = round_up(
> + (unsigned long)object + page->slab_cache->object_size,
> + KASAN_SHADOW_SCALE_SIZE);
> + kasan_unpoison_shadow(object, size);
> + kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
> + KASAN_KMALLOC_REDZONE);
> +}
> +
> void kasan_krealloc(const void *object, size_t size, gfp_t flags)
> {
> struct page *page;
> @@ -636,7 +665,20 @@ void kasan_kfree(void *ptr)
> kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
> KASAN_FREE_PAGE);
> else
> - kasan_slab_free(page->slab_cache, ptr);
> + kasan_poison_slab_free(page->slab_cache, ptr);
> +}
> +
> +void kasan_poison_kfree(void *ptr)
Unused
> +{
> + struct page *page;
> +
> + page = virt_to_head_page(ptr);
> +
> + if (unlikely(!PageSlab(page)))
> + kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
> + KASAN_FREE_PAGE);
> + else
> + kasan_poison_slab_free(page->slab_cache, ptr);
> }
>
> void kasan_kfree_large(const void *ptr)
> diff --git a/mm/mempool.c b/mm/mempool.c
> index 9e075f8..bcd48c6 100644
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -115,9 +115,10 @@ static void kasan_poison_element(mempool_t *pool, void *element)
> static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
> {
> if (pool->alloc == mempool_alloc_slab)
> - kasan_slab_alloc(pool->pool_data, element, flags);
> + kasan_slab_alloc(pool->pool_data, element,
> + /*just_unpoison*/ false, flags);
> if (pool->alloc == mempool_kmalloc)
> - kasan_krealloc(element, (size_t)pool->pool_data, flags);
> + kasan_unpoison_kmalloc(element, (size_t)pool->pool_data, flags);
I think, that the current code here is fine.
We only need to fix kasan_poison_element() which calls kasan_kfree() that puts objects into quarantine.
Powered by blists - more mailing lists