commit 589aa8077dcf413b06c78c6d8095496c98720c25 Author: Stanislav Fomichev AuthorDate: Tue Dec 20 11:51:17 2022 -0800 Commit: Stanislav Fomichev CommitDate: Tue Dec 20 11:51:17 2022 -0800 Revert "mm: Make ksize() a reporting-only function" This reverts commit 38931d8989b5760b0bd17c9ec99e81986258e4cb. diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 73684642c42d..0d59098f0876 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -783,30 +783,23 @@ static void kasan_global_oob_left(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); } -/* Check that ksize() does NOT unpoison whole object. */ +/* Check that ksize() makes the whole object accessible. */ static void ksize_unpoisons_memory(struct kunit *test) { char *ptr; - size_t size = 128 - KASAN_GRANULE_SIZE - 5; - size_t real_size; + size_t size = 123, real_size; ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - real_size = ksize(ptr); - KUNIT_EXPECT_GT(test, real_size, size); OPTIMIZER_HIDE_VAR(ptr); - /* These accesses shouldn't trigger a KASAN report. */ - ptr[0] = 'x'; - ptr[size - 1] = 'x'; + /* This access shouldn't trigger a KASAN report. */ + ptr[size] = 'x'; - /* These must trigger a KASAN report. */ - if (IS_ENABLED(CONFIG_KASAN_GENERIC)) - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); + /* This one must. */ + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]); kfree(ptr); } diff --git a/mm/slab_common.c b/mm/slab_common.c index 7e96abf1bd7d..33b1886b06eb 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1333,11 +1333,11 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags) void *ret; size_t ks; - /* Check for double-free before calling ksize. */ + /* Don't use instrumented ksize to allow precise KASAN poisoning. */ if (likely(!ZERO_OR_NULL_PTR(p))) { if (!kasan_check_byte(p)) return NULL; - ks = ksize(p); + ks = kfence_ksize(p) ?: __ksize(p); } else ks = 0; @@ -1405,10 +1405,8 @@ void kfree_sensitive(const void *p) void *mem = (void *)p; ks = ksize(mem); - if (ks) { - kasan_unpoison_range(mem, ks); + if (ks) memzero_explicit(mem, ks); - } kfree(mem); } EXPORT_SYMBOL(kfree_sensitive); @@ -1429,11 +1427,13 @@ EXPORT_SYMBOL(kfree_sensitive); */ size_t ksize(const void *objp) { + size_t size; + /* - * We need to first check that the pointer to the object is valid. - * The KASAN report printed from ksize() is more useful, then when - * it's printed later when the behaviour could be undefined due to - * a potential use-after-free or double-free. + * We need to first check that the pointer to the object is valid, and + * only then unpoison the memory. The report printed from ksize() is + * more useful, then when it's printed later when the behaviour could + * be undefined due to a potential use-after-free or double-free. * * We use kasan_check_byte(), which is supported for the hardware * tag-based KASAN mode, unlike kasan_check_read/write(). @@ -1447,7 +1447,13 @@ size_t ksize(const void *objp) if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) return 0; - return kfence_ksize(objp) ?: __ksize(objp); + size = kfence_ksize(objp) ?: __ksize(objp); + /* + * We assume that ksize callers could use whole allocated area, + * so we need to unpoison this area. + */ + kasan_unpoison_range(objp, size); + return size; } EXPORT_SYMBOL(ksize);