[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <230127af-6c71-e51e-41a4-aa9547c2c847@suse.cz>
Date: Fri, 18 Nov 2022 11:32:36 +0100
From: Vlastimil Babka <vbabka@...e.cz>
To: Kees Cook <keescook@...omium.org>,
Andrey Konovalov <andreyknvl@...il.com>
Cc: Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Hyeonggon Yoo <42.hyeyoo@...il.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
Alexander Potapenko <glider@...gle.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Vincenzo Frascino <vincenzo.frascino@....com>,
linux-mm@...ck.org, kasan-dev@...glegroups.com,
linux-kernel@...r.kernel.org, linux-hardening@...r.kernel.org
Subject: Re: [PATCH v2] mm: Make ksize() a reporting-only function
On 11/18/22 04:56, Kees Cook wrote:
> With all "silently resizing" callers of ksize() refactored, remove the
At cursory look seems it's true now in -next (but not mainline?) can you
confirm?
That would probably be safe enough to have slab.git expose this to -next now
and time a PR appropriately in the next merge window?
> logic in ksize() that would allow it to be used to effectively change
> the size of an allocation (bypassing __alloc_size hints, etc). Users
> wanting this feature need to either use kmalloc_size_roundup() before an
> allocation, or use krealloc() directly.
>
> For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> some of the partially open-coded ksize() in __do_krealloc with ksize()
> now that it doesn't perform unpoisoning.
>
> Adjust the KUnit tests to match the new ksize() behavior.
>
> Cc: Andrey Konovalov <andreyknvl@...il.com>
> Cc: Christoph Lameter <cl@...ux.com>
> Cc: Pekka Enberg <penberg@...nel.org>
> Cc: David Rientjes <rientjes@...gle.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@....com>
> Cc: Andrew Morton <akpm@...ux-foundation.org>
> Cc: Roman Gushchin <roman.gushchin@...ux.dev>
> Cc: Hyeonggon Yoo <42.hyeyoo@...il.com>
> Cc: Andrey Ryabinin <ryabinin.a.a@...il.com>
> Cc: Alexander Potapenko <glider@...gle.com>
> Cc: Dmitry Vyukov <dvyukov@...gle.com>
> Cc: Vincenzo Frascino <vincenzo.frascino@....com>
> Cc: linux-mm@...ck.org
> Cc: kasan-dev@...glegroups.com
> Acked-by: Vlastimil Babka <vbabka@...e.cz>
> Signed-off-by: Kees Cook <keescook@...omium.org>
> ---
> v2:
> - improve kunit test precision (andreyknvl)
> - add Ack (vbabka)
> v1: https://lore.kernel.org/all/20221022180455.never.023-kees@kernel.org
> ---
> mm/kasan/kasan_test.c | 14 +++++++++-----
> mm/slab_common.c | 26 ++++++++++----------------
> 2 files changed, 19 insertions(+), 21 deletions(-)
>
> diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> index 7502f03c807c..fc4b22916587 100644
> --- a/mm/kasan/kasan_test.c
> +++ b/mm/kasan/kasan_test.c
> @@ -821,7 +821,7 @@ static void kasan_global_oob_left(struct kunit *test)
> KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
> }
>
> -/* Check that ksize() makes the whole object accessible. */
> +/* Check that ksize() does NOT unpoison whole object. */
> static void ksize_unpoisons_memory(struct kunit *test)
> {
> char *ptr;
> @@ -829,15 +829,19 @@ static void ksize_unpoisons_memory(struct kunit *test)
>
> ptr = kmalloc(size, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +
> real_size = ksize(ptr);
> + KUNIT_EXPECT_GT(test, real_size, size);
>
> OPTIMIZER_HIDE_VAR(ptr);
>
> - /* This access shouldn't trigger a KASAN report. */
> - ptr[size] = 'x';
> + /* These accesses shouldn't trigger a KASAN report. */
> + ptr[0] = 'x';
> + ptr[size - 1] = 'x';
>
> - /* This one must. */
> - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> + /* These must trigger a KASAN report. */
> + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
> + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
>
> kfree(ptr);
> }
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8276022f0da4..27caa57af070 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1335,11 +1335,11 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
> void *ret;
> size_t ks;
>
> - /* Don't use instrumented ksize to allow precise KASAN poisoning. */
> + /* Check for double-free before calling ksize. */
> if (likely(!ZERO_OR_NULL_PTR(p))) {
> if (!kasan_check_byte(p))
> return NULL;
> - ks = kfence_ksize(p) ?: __ksize(p);
> + ks = ksize(p);
> } else
> ks = 0;
>
> @@ -1407,21 +1407,21 @@ void kfree_sensitive(const void *p)
> void *mem = (void *)p;
>
> ks = ksize(mem);
> - if (ks)
> + if (ks) {
> + kasan_unpoison_range(mem, ks);
> memzero_explicit(mem, ks);
> + }
> kfree(mem);
> }
> EXPORT_SYMBOL(kfree_sensitive);
>
> size_t ksize(const void *objp)
> {
> - size_t size;
> -
> /*
> - * We need to first check that the pointer to the object is valid, and
> - * only then unpoison the memory. The report printed from ksize() is
> - * more useful, then when it's printed later when the behaviour could
> - * be undefined due to a potential use-after-free or double-free.
> + * We need to first check that the pointer to the object is valid.
> + * The KASAN report printed from ksize() is more useful, then when
> + * it's printed later when the behaviour could be undefined due to
> + * a potential use-after-free or double-free.
> *
> * We use kasan_check_byte(), which is supported for the hardware
> * tag-based KASAN mode, unlike kasan_check_read/write().
> @@ -1435,13 +1435,7 @@ size_t ksize(const void *objp)
> if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
> return 0;
>
> - size = kfence_ksize(objp) ?: __ksize(objp);
> - /*
> - * We assume that ksize callers could use whole allocated area,
> - * so we need to unpoison this area.
> - */
> - kasan_unpoison_range(objp, size);
> - return size;
> + return kfence_ksize(objp) ?: __ksize(objp);
> }
> EXPORT_SYMBOL(ksize);
>
Powered by blists - more mailing lists