lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 16 Oct 2020 22:05:20 +0200
From:   Andrey Konovalov <andreyknvl@...gle.com>
To:     Andrew Morton <akpm@...ux-foundation.org>
Cc:     Andrey Ryabinin <aryabinin@...tuozzo.com>,
        Alexander Potapenko <glider@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>,
        Marco Elver <elver@...gle.com>,
        Vincenzo Frascino <vincenzo.frascino@....com>,
        David Gow <davidgow@...gle.com>,
        kasan-dev <kasan-dev@...glegroups.com>,
        Linux Memory Management List <linux-mm@...ck.org>,
        LKML <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] kasan: adopt KUNIT tests to SW_TAGS mode

On Fri, Oct 16, 2020 at 9:33 PM Andrey Konovalov <andreyknvl@...gle.com> wrote:
>
> Now that we have KASAN-KUNIT tests integration, it's easy to see that
> some KASAN tests are not adopted to the SW_TAGS mode and are failing.
>
> Adjust the allocation size for kasan_memchr() and kasan_memcmp() by
> roung it up to OOB_TAG_OFF so the bad access ends up in a separate
> memory granule.
>
> Add new kmalloc_uaf_16() and kasan_bitops_uaf() tests that rely on UAFs,
> as it's hard to adopt the existing kmalloc_oob_16() and kasan_bitops_oob()
> (rename from kasan_bitops()) without losing the precision.
>
> Disable kasan_global_oob() and kasan_alloca_oob_left/right() as SW_TAGS
> mode doesn't instrument globals nor dynamic allocas.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@...gle.com>
> ---
>  lib/test_kasan.c | 144 ++++++++++++++++++++++++++++++++---------------
>  1 file changed, 99 insertions(+), 45 deletions(-)
>
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index 63c26171a791..3bff25a7fdcc 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -216,6 +216,12 @@ static void kmalloc_oob_16(struct kunit *test)
>                 u64 words[2];
>         } *ptr1, *ptr2;
>
> +       /* This test is specifically crafted for the generic mode. */
> +       if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> +               kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
> +               return;
> +       }
> +
>         ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
>         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
>
> @@ -227,6 +233,23 @@ static void kmalloc_oob_16(struct kunit *test)
>         kfree(ptr2);
>  }
>
> +static void kmalloc_uaf_16(struct kunit *test)
> +{
> +       struct {
> +               u64 words[2];
> +       } *ptr1, *ptr2;
> +
> +       ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
> +       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
> +
> +       ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
> +       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
> +       kfree(ptr2);
> +
> +       KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
> +       kfree(ptr1);
> +}
> +
>  static void kmalloc_oob_memset_2(struct kunit *test)
>  {
>         char *ptr;
> @@ -429,6 +452,12 @@ static void kasan_global_oob(struct kunit *test)
>         volatile int i = 3;
>         char *p = &global_array[ARRAY_SIZE(global_array) + i];
>
> +       /* Only generic mode instruments globals. */
> +       if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> +               kunit_info(test, "CONFIG_KASAN_GENERIC required");
> +               return;
> +       }
> +
>         KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
>  }
>
> @@ -467,6 +496,12 @@ static void kasan_alloca_oob_left(struct kunit *test)
>         char alloca_array[i];
>         char *p = alloca_array - 1;
>
> +       /* Only generic mode instruments dynamic allocas. */
> +       if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> +               kunit_info(test, "CONFIG_KASAN_GENERIC required");
> +               return;
> +       }
> +
>         if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
>                 kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
>                 return;
> @@ -481,6 +516,12 @@ static void kasan_alloca_oob_right(struct kunit *test)
>         char alloca_array[i];
>         char *p = alloca_array + i;
>
> +       /* Only generic mode instruments dynamic allocas. */
> +       if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> +               kunit_info(test, "CONFIG_KASAN_GENERIC required");
> +               return;
> +       }
> +
>         if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
>                 kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
>                 return;
> @@ -551,6 +592,9 @@ static void kasan_memchr(struct kunit *test)
>                 return;
>         }
>
> +       if (OOB_TAG_OFF)
> +               size = round_up(size, OOB_TAG_OFF);
> +
>         ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
>         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
>
> @@ -573,6 +617,9 @@ static void kasan_memcmp(struct kunit *test)
>                 return;
>         }
>
> +       if (OOB_TAG_OFF)
> +               size = round_up(size, OOB_TAG_OFF);
> +
>         ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
>         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
>         memset(arr, 0, sizeof(arr));
> @@ -619,13 +666,50 @@ static void kasan_strings(struct kunit *test)
>         KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
>  }
>
> -static void kasan_bitops(struct kunit *test)
> +static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
> +{
> +       KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
> +}
> +
> +static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
>  {
> +       KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
> +       KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
> +
> +#if defined(clear_bit_unlock_is_negative_byte)
> +       KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
> +                               clear_bit_unlock_is_negative_byte(nr, addr));
> +#endif
> +}
> +
> +static void kasan_bitops_oob(struct kunit *test)
> +{
> +       long *bits;
> +
> +       /* This test is specifically crafted for the generic mode. */
> +       if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> +               kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
> +               return;
> +       }
> +
>         /*
>          * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
>          * this way we do not actually corrupt other memory.
>          */
> -       long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
> +       bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
>         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
>
>         /*
> @@ -633,56 +717,24 @@ static void kasan_bitops(struct kunit *test)
>          * below accesses are still out-of-bounds, since bitops are defined to
>          * operate on the whole long the bit is in.
>          */
> -       KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits));
> +       kasan_bitops_modify(test, BITS_PER_LONG, bits);
>
>         /*
>          * Below calls try to access bit beyond allocated memory.
>          */
> -       KUNIT_EXPECT_KASAN_FAIL(test,
> -               test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test,
> -               __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
> +       kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
>
> -       KUNIT_EXPECT_KASAN_FAIL(test,
> -               test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test,
> -               test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test,
> -               __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test,
> -               test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
> -
> -       KUNIT_EXPECT_KASAN_FAIL(test,
> -               __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
> +       kfree(bits);
> +}
>
> -       KUNIT_EXPECT_KASAN_FAIL(test,
> -               kasan_int_result =
> -                       test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
> +static void kasan_bitops_uaf(struct kunit *test)
> +{
> +       long *bits = kzalloc(sizeof(*bits), GFP_KERNEL);
>
> -#if defined(clear_bit_unlock_is_negative_byte)
> -       KUNIT_EXPECT_KASAN_FAIL(test,
> -               kasan_int_result = clear_bit_unlock_is_negative_byte(
> -                       BITS_PER_LONG + BITS_PER_BYTE, bits));
> -#endif
> +       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
>         kfree(bits);
> +       kasan_bitops_modify(test, BITS_PER_LONG, bits);
> +       kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
>  }

This actually ends up modifying the data in a freed object, which
isn't a good idea. I'll change this to do an OOB too, but for the
tag-based mode.

>
>  static void kmalloc_double_kzfree(struct kunit *test)
> @@ -728,6 +780,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
>         KUNIT_CASE(kmalloc_oob_krealloc_more),
>         KUNIT_CASE(kmalloc_oob_krealloc_less),
>         KUNIT_CASE(kmalloc_oob_16),
> +       KUNIT_CASE(kmalloc_uaf_16),
>         KUNIT_CASE(kmalloc_oob_in_memset),
>         KUNIT_CASE(kmalloc_oob_memset_2),
>         KUNIT_CASE(kmalloc_oob_memset_4),
> @@ -751,7 +804,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
>         KUNIT_CASE(kasan_memchr),
>         KUNIT_CASE(kasan_memcmp),
>         KUNIT_CASE(kasan_strings),
> -       KUNIT_CASE(kasan_bitops),
> +       KUNIT_CASE(kasan_bitops_oob),
> +       KUNIT_CASE(kasan_bitops_uaf),
>         KUNIT_CASE(kmalloc_double_kzfree),
>         KUNIT_CASE(vmalloc_oob),
>         {}
> --
> 2.29.0.rc1.297.gfa9743e501-goog
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ