[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YBq3uZOKeRnW3eBl@elver.google.com>
Date: Wed, 3 Feb 2021 15:48:25 +0100
From: Marco Elver <elver@...gle.com>
To: Andrey Konovalov <andreyknvl@...gle.com>
Cc: Catalin Marinas <catalin.marinas@....com>,
Vincenzo Frascino <vincenzo.frascino@....com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Alexander Potapenko <glider@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Will Deacon <will.deacon@....com>,
Andrey Ryabinin <aryabinin@...tuozzo.com>,
Peter Collingbourne <pcc@...gle.com>,
Evgenii Stepanov <eugenis@...gle.com>,
Branislav Rankov <Branislav.Rankov@....com>,
Kevin Brodsky <kevin.brodsky@....com>,
kasan-dev@...glegroups.com, linux-arm-kernel@...ts.infradead.org,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 06/12] kasan: rework krealloc tests
On Mon, Feb 01, 2021 at 08:43PM +0100, Andrey Konovalov wrote:
> This patch reworks KASAN-KUnit tests for krealloc() to:
>
> 1. Check both slab and page_alloc based krealloc() implementations.
> 2. Allow at least one full granule to fit between old and new sizes for
> each KASAN mode, and check accesses to that granule accordingly.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@...gle.com>
Reviewed-by: Marco Elver <elver@...gle.com>
> ---
> lib/test_kasan.c | 91 ++++++++++++++++++++++++++++++++++++++++++------
> 1 file changed, 81 insertions(+), 10 deletions(-)
>
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index 5699e43ca01b..2bb52853f341 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -258,11 +258,14 @@ static void kmalloc_large_oob_right(struct kunit *test)
> kfree(ptr);
> }
>
> -static void kmalloc_oob_krealloc_more(struct kunit *test)
> +static void krealloc_more_oob_helper(struct kunit *test,
> + size_t size1, size_t size2)
> {
> char *ptr1, *ptr2;
> - size_t size1 = 17;
> - size_t size2 = 19;
> + size_t middle;
> +
> + KUNIT_ASSERT_LT(test, size1, size2);
> + middle = size1 + (size2 - size1) / 2;
>
> ptr1 = kmalloc(size1, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
> @@ -270,15 +273,31 @@ static void kmalloc_oob_krealloc_more(struct kunit *test)
> ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
>
> - KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
> + /* All offsets up to size2 must be accessible. */
> + ptr2[size1 - 1] = 'x';
> + ptr2[size1] = 'x';
> + ptr2[middle] = 'x';
> + ptr2[size2 - 1] = 'x';
> +
> + /* Generic mode is precise, so unaligned size2 must be inaccessible. */
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC))
> + KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
> +
> + /* For all modes first aligned offset after size2 must be inaccessible. */
> + KUNIT_EXPECT_KASAN_FAIL(test,
> + ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
> +
> kfree(ptr2);
> }
>
> -static void kmalloc_oob_krealloc_less(struct kunit *test)
> +static void krealloc_less_oob_helper(struct kunit *test,
> + size_t size1, size_t size2)
> {
> char *ptr1, *ptr2;
> - size_t size1 = 17;
> - size_t size2 = 15;
> + size_t middle;
> +
> + KUNIT_ASSERT_LT(test, size2, size1);
> + middle = size2 + (size1 - size2) / 2;
>
> ptr1 = kmalloc(size1, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
> @@ -286,10 +305,60 @@ static void kmalloc_oob_krealloc_less(struct kunit *test)
> ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
>
> - KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
> + /* Must be accessible for all modes. */
> + ptr2[size2 - 1] = 'x';
> +
> + /* Generic mode is precise, so unaligned size2 must be inaccessible. */
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC))
> + KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
> +
> + /* For all modes first aligned offset after size2 must be inaccessible. */
> + KUNIT_EXPECT_KASAN_FAIL(test,
> + ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
> +
> + /*
> + * For all modes both middle and size1 should land in separate granules
middle, size1, and size2?
> + * and thus be inaccessible.
> + */
> + KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
> + round_down(middle, KASAN_GRANULE_SIZE));
> + KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
> + round_down(size1, KASAN_GRANULE_SIZE));
> + KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
> + KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
> + KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
> +
> kfree(ptr2);
> }
>
> +static void krealloc_more_oob(struct kunit *test)
> +{
> + krealloc_more_oob_helper(test, 201, 235);
> +}
> +
> +static void krealloc_less_oob(struct kunit *test)
> +{
> + krealloc_less_oob_helper(test, 235, 201);
> +}
> +
> +static void krealloc_pagealloc_more_oob(struct kunit *test)
> +{
> + /* page_alloc fallback in only implemented for SLUB. */
> + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
> +
> + krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
> + KMALLOC_MAX_CACHE_SIZE + 235);
> +}
> +
> +static void krealloc_pagealloc_less_oob(struct kunit *test)
> +{
> + /* page_alloc fallback in only implemented for SLUB. */
> + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
> +
> + krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
> + KMALLOC_MAX_CACHE_SIZE + 201);
> +}
> +
> static void kmalloc_oob_16(struct kunit *test)
> {
> struct {
> @@ -983,8 +1052,10 @@ static struct kunit_case kasan_kunit_test_cases[] = {
> KUNIT_CASE(pagealloc_oob_right),
> KUNIT_CASE(pagealloc_uaf),
> KUNIT_CASE(kmalloc_large_oob_right),
> - KUNIT_CASE(kmalloc_oob_krealloc_more),
> - KUNIT_CASE(kmalloc_oob_krealloc_less),
> + KUNIT_CASE(krealloc_more_oob),
> + KUNIT_CASE(krealloc_less_oob),
> + KUNIT_CASE(krealloc_pagealloc_more_oob),
> + KUNIT_CASE(krealloc_pagealloc_less_oob),
> KUNIT_CASE(kmalloc_oob_16),
> KUNIT_CASE(kmalloc_uaf_16),
> KUNIT_CASE(kmalloc_oob_in_memset),
> --
> 2.30.0.365.g02bc693789-goog
>
Powered by blists - more mailing lists